aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorWu Fengguang <fengguang.wu@intel.com>2011-04-21 14:06:32 -0400
committerWu Fengguang <fengguang.wu@intel.com>2011-06-07 20:25:21 -0400
commite8dfc30582995ae12454cda517b17d6294175b07 (patch)
tree357fe8482d6d125c5b96bb398b4a588cc2c1f16f /fs/fs-writeback.c
parentf758eeabeb96f878c860e8f110f94ec8820822a9 (diff)
writeback: elevate queue_io() into wb_writeback()
Code refactor for more logical code layout. No behavior change. - remove the mis-named __writeback_inodes_sb() - wb_writeback()/writeback_inodes_wb() will decide when to queue_io() before calling __writeback_inodes_wb() Acked-by: Jan Kara <jack@suse.cz> Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 36a30917e0dc..565b1fd15be6 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -580,17 +580,13 @@ static int writeback_sb_inodes(struct super_block *sb, struct bdi_writeback *wb,
580 return 1; 580 return 1;
581} 581}
582 582
583void writeback_inodes_wb(struct bdi_writeback *wb, 583static void __writeback_inodes_wb(struct bdi_writeback *wb,
584 struct writeback_control *wbc) 584 struct writeback_control *wbc)
585{ 585{
586 int ret = 0; 586 int ret = 0;
587 587
588 if (!wbc->wb_start) 588 if (!wbc->wb_start)
589 wbc->wb_start = jiffies; /* livelock avoidance */ 589 wbc->wb_start = jiffies; /* livelock avoidance */
590 spin_lock(&wb->list_lock);
591
592 if (list_empty(&wb->b_io))
593 queue_io(wb, wbc->older_than_this);
594 590
595 while (!list_empty(&wb->b_io)) { 591 while (!list_empty(&wb->b_io)) {
596 struct inode *inode = wb_inode(wb->b_io.prev); 592 struct inode *inode = wb_inode(wb->b_io.prev);
@@ -606,19 +602,16 @@ void writeback_inodes_wb(struct bdi_writeback *wb,
606 if (ret) 602 if (ret)
607 break; 603 break;
608 } 604 }
609 spin_unlock(&wb->list_lock);
610 /* Leave any unwritten inodes on b_io */ 605 /* Leave any unwritten inodes on b_io */
611} 606}
612 607
613static void __writeback_inodes_sb(struct super_block *sb, 608void writeback_inodes_wb(struct bdi_writeback *wb,
614 struct bdi_writeback *wb, struct writeback_control *wbc) 609 struct writeback_control *wbc)
615{ 610{
616 WARN_ON(!rwsem_is_locked(&sb->s_umount));
617
618 spin_lock(&wb->list_lock); 611 spin_lock(&wb->list_lock);
619 if (list_empty(&wb->b_io)) 612 if (list_empty(&wb->b_io))
620 queue_io(wb, wbc->older_than_this); 613 queue_io(wb, wbc->older_than_this);
621 writeback_sb_inodes(sb, wb, wbc, true); 614 __writeback_inodes_wb(wb, wbc);
622 spin_unlock(&wb->list_lock); 615 spin_unlock(&wb->list_lock);
623} 616}
624 617
@@ -685,7 +678,7 @@ static long wb_writeback(struct bdi_writeback *wb,
685 * The intended call sequence for WB_SYNC_ALL writeback is: 678 * The intended call sequence for WB_SYNC_ALL writeback is:
686 * 679 *
687 * wb_writeback() 680 * wb_writeback()
688 * __writeback_inodes_sb() <== called only once 681 * writeback_sb_inodes() <== called only once
689 * write_cache_pages() <== called once for each inode 682 * write_cache_pages() <== called once for each inode
690 * (quickly) tag currently dirty pages 683 * (quickly) tag currently dirty pages
691 * (maybe slowly) sync all tagged pages 684 * (maybe slowly) sync all tagged pages
@@ -694,6 +687,7 @@ static long wb_writeback(struct bdi_writeback *wb,
694 write_chunk = LONG_MAX; 687 write_chunk = LONG_MAX;
695 688
696 wbc.wb_start = jiffies; /* livelock avoidance */ 689 wbc.wb_start = jiffies; /* livelock avoidance */
690 spin_lock(&wb->list_lock);
697 for (;;) { 691 for (;;) {
698 /* 692 /*
699 * Stop writeback when nr_pages has been consumed 693 * Stop writeback when nr_pages has been consumed
@@ -730,10 +724,12 @@ static long wb_writeback(struct bdi_writeback *wb,
730 wbc.inodes_written = 0; 724 wbc.inodes_written = 0;
731 725
732 trace_wbc_writeback_start(&wbc, wb->bdi); 726 trace_wbc_writeback_start(&wbc, wb->bdi);
727 if (list_empty(&wb->b_io))
728 queue_io(wb, wbc.older_than_this);
733 if (work->sb) 729 if (work->sb)
734 __writeback_inodes_sb(work->sb, wb, &wbc); 730 writeback_sb_inodes(work->sb, wb, &wbc, true);
735 else 731 else
736 writeback_inodes_wb(wb, &wbc); 732 __writeback_inodes_wb(wb, &wbc);
737 trace_wbc_writeback_written(&wbc, wb->bdi); 733 trace_wbc_writeback_written(&wbc, wb->bdi);
738 734
739 work->nr_pages -= write_chunk - wbc.nr_to_write; 735 work->nr_pages -= write_chunk - wbc.nr_to_write;
@@ -761,7 +757,6 @@ static long wb_writeback(struct bdi_writeback *wb,
761 * become available for writeback. Otherwise 757 * become available for writeback. Otherwise
762 * we'll just busyloop. 758 * we'll just busyloop.
763 */ 759 */
764 spin_lock(&wb->list_lock);
765 if (!list_empty(&wb->b_more_io)) { 760 if (!list_empty(&wb->b_more_io)) {
766 inode = wb_inode(wb->b_more_io.prev); 761 inode = wb_inode(wb->b_more_io.prev);
767 trace_wbc_writeback_wait(&wbc, wb->bdi); 762 trace_wbc_writeback_wait(&wbc, wb->bdi);
@@ -769,8 +764,8 @@ static long wb_writeback(struct bdi_writeback *wb,
769 inode_wait_for_writeback(inode, wb); 764 inode_wait_for_writeback(inode, wb);
770 spin_unlock(&inode->i_lock); 765 spin_unlock(&inode->i_lock);
771 } 766 }
772 spin_unlock(&wb->list_lock);
773 } 767 }
768 spin_unlock(&wb->list_lock);
774 769
775 return wrote; 770 return wrote;
776} 771}