diff options
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 78 |
1 files changed, 49 insertions, 29 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 783ed44c7cfe..c5e91225501d 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -35,6 +35,17 @@ | |||
35 | int nr_pdflush_threads; | 35 | int nr_pdflush_threads; |
36 | 36 | ||
37 | /* | 37 | /* |
38 | * Passed into wb_writeback(), essentially a subset of writeback_control | ||
39 | */ | ||
40 | struct wb_writeback_args { | ||
41 | long nr_pages; | ||
42 | struct super_block *sb; | ||
43 | enum writeback_sync_modes sync_mode; | ||
44 | int for_kupdate; | ||
45 | int range_cyclic; | ||
46 | }; | ||
47 | |||
48 | /* | ||
38 | * Work items for the bdi_writeback threads | 49 | * Work items for the bdi_writeback threads |
39 | */ | 50 | */ |
40 | struct bdi_work { | 51 | struct bdi_work { |
@@ -45,9 +56,7 @@ struct bdi_work { | |||
45 | unsigned long seen; | 56 | unsigned long seen; |
46 | atomic_t pending; | 57 | atomic_t pending; |
47 | 58 | ||
48 | struct super_block *sb; | 59 | struct wb_writeback_args args; |
49 | unsigned long nr_pages; | ||
50 | enum writeback_sync_modes sync_mode; | ||
51 | 60 | ||
52 | unsigned long state; | 61 | unsigned long state; |
53 | }; | 62 | }; |
@@ -69,9 +78,11 @@ static inline void bdi_work_init(struct bdi_work *work, | |||
69 | struct writeback_control *wbc) | 78 | struct writeback_control *wbc) |
70 | { | 79 | { |
71 | INIT_RCU_HEAD(&work->rcu_head); | 80 | INIT_RCU_HEAD(&work->rcu_head); |
72 | work->sb = wbc->sb; | 81 | work->args.sb = wbc->sb; |
73 | work->nr_pages = wbc->nr_to_write; | 82 | work->args.nr_pages = wbc->nr_to_write; |
74 | work->sync_mode = wbc->sync_mode; | 83 | work->args.sync_mode = wbc->sync_mode; |
84 | work->args.range_cyclic = wbc->range_cyclic; | ||
85 | work->args.for_kupdate = 0; | ||
75 | work->state = WS_USED; | 86 | work->state = WS_USED; |
76 | } | 87 | } |
77 | 88 | ||
@@ -106,7 +117,7 @@ static void bdi_work_free(struct rcu_head *head) | |||
106 | 117 | ||
107 | static void wb_work_complete(struct bdi_work *work) | 118 | static void wb_work_complete(struct bdi_work *work) |
108 | { | 119 | { |
109 | const enum writeback_sync_modes sync_mode = work->sync_mode; | 120 | const enum writeback_sync_modes sync_mode = work->args.sync_mode; |
110 | 121 | ||
111 | /* | 122 | /* |
112 | * For allocated work, we can clear the done/seen bit right here. | 123 | * For allocated work, we can clear the done/seen bit right here. |
@@ -653,17 +664,16 @@ static inline bool over_bground_thresh(void) | |||
653 | * older_than_this takes precedence over nr_to_write. So we'll only write back | 664 | * older_than_this takes precedence over nr_to_write. So we'll only write back |
654 | * all dirty pages if they are all attached to "old" mappings. | 665 | * all dirty pages if they are all attached to "old" mappings. |
655 | */ | 666 | */ |
656 | static long wb_writeback(struct bdi_writeback *wb, long nr_pages, | 667 | static long wb_writeback(struct bdi_writeback *wb, |
657 | struct super_block *sb, | 668 | struct wb_writeback_args *args) |
658 | enum writeback_sync_modes sync_mode, int for_kupdate) | ||
659 | { | 669 | { |
660 | struct writeback_control wbc = { | 670 | struct writeback_control wbc = { |
661 | .bdi = wb->bdi, | 671 | .bdi = wb->bdi, |
662 | .sb = sb, | 672 | .sb = args->sb, |
663 | .sync_mode = sync_mode, | 673 | .sync_mode = args->sync_mode, |
664 | .older_than_this = NULL, | 674 | .older_than_this = NULL, |
665 | .for_kupdate = for_kupdate, | 675 | .for_kupdate = args->for_kupdate, |
666 | .range_cyclic = 1, | 676 | .range_cyclic = args->range_cyclic, |
667 | }; | 677 | }; |
668 | unsigned long oldest_jif; | 678 | unsigned long oldest_jif; |
669 | long wrote = 0; | 679 | long wrote = 0; |
@@ -673,13 +683,18 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages, | |||
673 | oldest_jif = jiffies - | 683 | oldest_jif = jiffies - |
674 | msecs_to_jiffies(dirty_expire_interval * 10); | 684 | msecs_to_jiffies(dirty_expire_interval * 10); |
675 | } | 685 | } |
686 | if (!wbc.range_cyclic) { | ||
687 | wbc.range_start = 0; | ||
688 | wbc.range_end = LLONG_MAX; | ||
689 | } | ||
676 | 690 | ||
677 | for (;;) { | 691 | for (;;) { |
678 | /* | 692 | /* |
679 | * Don't flush anything for non-integrity writeback where | 693 | * Don't flush anything for non-integrity writeback where |
680 | * no nr_pages was given | 694 | * no nr_pages was given |
681 | */ | 695 | */ |
682 | if (!for_kupdate && nr_pages <= 0 && sync_mode == WB_SYNC_NONE) | 696 | if (!args->for_kupdate && args->nr_pages <= 0 && |
697 | args->sync_mode == WB_SYNC_NONE) | ||
683 | break; | 698 | break; |
684 | 699 | ||
685 | /* | 700 | /* |
@@ -687,7 +702,8 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages, | |||
687 | * periodic background writeout and we are below the | 702 | * periodic background writeout and we are below the |
688 | * background dirty threshold, don't do anything | 703 | * background dirty threshold, don't do anything |
689 | */ | 704 | */ |
690 | if (for_kupdate && nr_pages <= 0 && !over_bground_thresh()) | 705 | if (args->for_kupdate && args->nr_pages <= 0 && |
706 | !over_bground_thresh()) | ||
691 | break; | 707 | break; |
692 | 708 | ||
693 | wbc.more_io = 0; | 709 | wbc.more_io = 0; |
@@ -695,7 +711,7 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages, | |||
695 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; | 711 | wbc.nr_to_write = MAX_WRITEBACK_PAGES; |
696 | wbc.pages_skipped = 0; | 712 | wbc.pages_skipped = 0; |
697 | writeback_inodes_wb(wb, &wbc); | 713 | writeback_inodes_wb(wb, &wbc); |
698 | nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 714 | args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; |
699 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; | 715 | wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; |
700 | 716 | ||
701 | /* | 717 | /* |
@@ -749,8 +765,16 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) | |||
749 | global_page_state(NR_UNSTABLE_NFS) + | 765 | global_page_state(NR_UNSTABLE_NFS) + |
750 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 766 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
751 | 767 | ||
752 | if (nr_pages) | 768 | if (nr_pages) { |
753 | return wb_writeback(wb, nr_pages, NULL, WB_SYNC_NONE, 1); | 769 | struct wb_writeback_args args = { |
770 | .nr_pages = nr_pages, | ||
771 | .sync_mode = WB_SYNC_NONE, | ||
772 | .for_kupdate = 1, | ||
773 | .range_cyclic = 1, | ||
774 | }; | ||
775 | |||
776 | return wb_writeback(wb, &args); | ||
777 | } | ||
754 | 778 | ||
755 | return 0; | 779 | return 0; |
756 | } | 780 | } |
@@ -762,35 +786,31 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait) | |||
762 | { | 786 | { |
763 | struct backing_dev_info *bdi = wb->bdi; | 787 | struct backing_dev_info *bdi = wb->bdi; |
764 | struct bdi_work *work; | 788 | struct bdi_work *work; |
765 | long nr_pages, wrote = 0; | 789 | long wrote = 0; |
766 | 790 | ||
767 | while ((work = get_next_work_item(bdi, wb)) != NULL) { | 791 | while ((work = get_next_work_item(bdi, wb)) != NULL) { |
768 | enum writeback_sync_modes sync_mode; | 792 | struct wb_writeback_args args = work->args; |
769 | |||
770 | nr_pages = work->nr_pages; | ||
771 | 793 | ||
772 | /* | 794 | /* |
773 | * Override sync mode, in case we must wait for completion | 795 | * Override sync mode, in case we must wait for completion |
774 | */ | 796 | */ |
775 | if (force_wait) | 797 | if (force_wait) |
776 | work->sync_mode = sync_mode = WB_SYNC_ALL; | 798 | work->args.sync_mode = args.sync_mode = WB_SYNC_ALL; |
777 | else | ||
778 | sync_mode = work->sync_mode; | ||
779 | 799 | ||
780 | /* | 800 | /* |
781 | * If this isn't a data integrity operation, just notify | 801 | * If this isn't a data integrity operation, just notify |
782 | * that we have seen this work and we are now starting it. | 802 | * that we have seen this work and we are now starting it. |
783 | */ | 803 | */ |
784 | if (sync_mode == WB_SYNC_NONE) | 804 | if (args.sync_mode == WB_SYNC_NONE) |
785 | wb_clear_pending(wb, work); | 805 | wb_clear_pending(wb, work); |
786 | 806 | ||
787 | wrote += wb_writeback(wb, nr_pages, work->sb, sync_mode, 0); | 807 | wrote += wb_writeback(wb, &args); |
788 | 808 | ||
789 | /* | 809 | /* |
790 | * This is a data integrity writeback, so only do the | 810 | * This is a data integrity writeback, so only do the |
791 | * notification when we have completed the work. | 811 | * notification when we have completed the work. |
792 | */ | 812 | */ |
793 | if (sync_mode == WB_SYNC_ALL) | 813 | if (args.sync_mode == WB_SYNC_ALL) |
794 | wb_clear_pending(wb, work); | 814 | wb_clear_pending(wb, work); |
795 | } | 815 | } |
796 | 816 | ||