aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-09-16 10:45:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-09-16 10:45:38 -0400
commita3eb51ecfa1d7be689f05c9f12cb0fcf862c516e (patch)
tree9995b3b9207c18a58ce99700f569b7bc2877e23c
parentfdaa45e95d2ef59a140d2fb2e487141f83f5a07c (diff)
parent1ef7d9aa32a8ee054c4d4fdcd2ea537c04d61b2f (diff)
Merge branch 'writeback' of git://git.kernel.dk/linux-2.6-block
* 'writeback' of git://git.kernel.dk/linux-2.6-block: writeback: fix possible bdi writeback refcounting problem writeback: Fix bdi use after free in wb_work_complete() writeback: improve scalability of bdi writeback work queues writeback: remove smp_mb(), it's not needed with list_add_tail_rcu() writeback: use schedule_timeout_interruptible() writeback: add comments to bdi_work structure writeback: splice dirty inode entries to default bdi on bdi_destroy() writeback: separate starting of sync vs opportunistic writeback writeback: inline allocation failure handling in bdi_alloc_queue_work() writeback: use RCU to protect bdi_list writeback: only use bdi_writeback_all() for WB_SYNC_NONE writeout fs: Assign bdi in super_block writeback: make wb_writeback() take an argument structure writeback: merely wakeup flusher thread if work allocation fails for WB_SYNC_NONE writeback: get rid of wbc->for_writepages fs: remove bdev->bd_inode_backing_dev_info
-rw-r--r--fs/afs/write.c1
-rw-r--r--fs/block_dev.c1
-rw-r--r--fs/btrfs/disk-io.c1
-rw-r--r--fs/btrfs/ordered-data.c1
-rw-r--r--fs/fs-writeback.c345
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/inode.c4
-rw-r--r--fs/jbd2/commit.c1
-rw-r--r--fs/nfs/super.c2
-rw-r--r--fs/nfs/write.c1
-rw-r--r--fs/nilfs2/the_nilfs.c4
-rw-r--r--fs/super.c6
-rw-r--r--fs/sync.c9
-rw-r--r--fs/ubifs/budget.c20
-rw-r--r--fs/ubifs/super.c1
-rw-r--r--include/linux/backing-dev.h3
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/writeback.h5
-rw-r--r--include/trace/events/ext4.h6
-rw-r--r--mm/backing-dev.c90
-rw-r--r--mm/page-writeback.c22
21 files changed, 264 insertions, 263 deletions
diff --git a/fs/afs/write.c b/fs/afs/write.c
index c2e7a7ff008..c63a3c8beb7 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -712,7 +712,6 @@ int afs_writeback_all(struct afs_vnode *vnode)
712 .bdi = mapping->backing_dev_info, 712 .bdi = mapping->backing_dev_info,
713 .sync_mode = WB_SYNC_ALL, 713 .sync_mode = WB_SYNC_ALL,
714 .nr_to_write = LONG_MAX, 714 .nr_to_write = LONG_MAX,
715 .for_writepages = 1,
716 .range_cyclic = 1, 715 .range_cyclic = 1,
717 }; 716 };
718 int ret; 717 int ret;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 3581a4e5394..71e7e03ac34 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -420,7 +420,6 @@ static void bdev_destroy_inode(struct inode *inode)
420{ 420{
421 struct bdev_inode *bdi = BDEV_I(inode); 421 struct bdev_inode *bdi = BDEV_I(inode);
422 422
423 bdi->bdev.bd_inode_backing_dev_info = NULL;
424 kmem_cache_free(bdev_cachep, bdi); 423 kmem_cache_free(bdev_cachep, bdi);
425} 424}
426 425
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 15831d5c736..8b819279001 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1600,6 +1600,7 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1600 1600
1601 sb->s_blocksize = 4096; 1601 sb->s_blocksize = 4096;
1602 sb->s_blocksize_bits = blksize_bits(4096); 1602 sb->s_blocksize_bits = blksize_bits(4096);
1603 sb->s_bdi = &fs_info->bdi;
1603 1604
1604 /* 1605 /*
1605 * we set the i_size on the btree inode to the max possible int. 1606 * we set the i_size on the btree inode to the max possible int.
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index d6f0806c682..7b2f401e604 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -740,7 +740,6 @@ int btrfs_fdatawrite_range(struct address_space *mapping, loff_t start,
740 .nr_to_write = mapping->nrpages * 2, 740 .nr_to_write = mapping->nrpages * 2,
741 .range_start = start, 741 .range_start = start,
742 .range_end = end, 742 .range_end = end,
743 .for_writepages = 1,
744 }; 743 };
745 return btrfs_writepages(mapping, &wbc); 744 return btrfs_writepages(mapping, &wbc);
746} 745}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 628235cf44b..8e1e5e19d21 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -35,21 +35,29 @@
35int nr_pdflush_threads; 35int nr_pdflush_threads;
36 36
37/* 37/*
38 * Passed into wb_writeback(), essentially a subset of writeback_control
39 */
40struct wb_writeback_args {
41 long nr_pages;
42 struct super_block *sb;
43 enum writeback_sync_modes sync_mode;
44 int for_kupdate;
45 int range_cyclic;
46};
47
48/*
38 * Work items for the bdi_writeback threads 49 * Work items for the bdi_writeback threads
39 */ 50 */
40struct bdi_work { 51struct bdi_work {
41 struct list_head list; 52 struct list_head list; /* pending work list */
42 struct list_head wait_list; 53 struct rcu_head rcu_head; /* for RCU free/clear of work */
43 struct rcu_head rcu_head;
44 54
45 unsigned long seen; 55 unsigned long seen; /* threads that have seen this work */
46 atomic_t pending; 56 atomic_t pending; /* number of threads still to do work */
47 57
48 struct super_block *sb; 58 struct wb_writeback_args args; /* writeback arguments */
49 unsigned long nr_pages;
50 enum writeback_sync_modes sync_mode;
51 59
52 unsigned long state; 60 unsigned long state; /* flag bits, see WS_* */
53}; 61};
54 62
55enum { 63enum {
@@ -66,22 +74,13 @@ static inline bool bdi_work_on_stack(struct bdi_work *work)
66} 74}
67 75
68static inline void bdi_work_init(struct bdi_work *work, 76static inline void bdi_work_init(struct bdi_work *work,
69 struct writeback_control *wbc) 77 struct wb_writeback_args *args)
70{ 78{
71 INIT_RCU_HEAD(&work->rcu_head); 79 INIT_RCU_HEAD(&work->rcu_head);
72 work->sb = wbc->sb; 80 work->args = *args;
73 work->nr_pages = wbc->nr_to_write;
74 work->sync_mode = wbc->sync_mode;
75 work->state = WS_USED; 81 work->state = WS_USED;
76} 82}
77 83
78static inline void bdi_work_init_on_stack(struct bdi_work *work,
79 struct writeback_control *wbc)
80{
81 bdi_work_init(work, wbc);
82 work->state |= WS_ONSTACK;
83}
84
85/** 84/**
86 * writeback_in_progress - determine whether there is writeback in progress 85 * writeback_in_progress - determine whether there is writeback in progress
87 * @bdi: the device's backing_dev_info structure. 86 * @bdi: the device's backing_dev_info structure.
@@ -98,6 +97,11 @@ static void bdi_work_clear(struct bdi_work *work)
98{ 97{
99 clear_bit(WS_USED_B, &work->state); 98 clear_bit(WS_USED_B, &work->state);
100 smp_mb__after_clear_bit(); 99 smp_mb__after_clear_bit();
100 /*
101 * work can have disappeared at this point. bit waitq functions
102 * should be able to tolerate this, provided bdi_sched_wait does
103 * not dereference it's pointer argument.
104 */
101 wake_up_bit(&work->state, WS_USED_B); 105 wake_up_bit(&work->state, WS_USED_B);
102} 106}
103 107
@@ -113,7 +117,8 @@ static void bdi_work_free(struct rcu_head *head)
113 117
114static void wb_work_complete(struct bdi_work *work) 118static void wb_work_complete(struct bdi_work *work)
115{ 119{
116 const enum writeback_sync_modes sync_mode = work->sync_mode; 120 const enum writeback_sync_modes sync_mode = work->args.sync_mode;
121 int onstack = bdi_work_on_stack(work);
117 122
118 /* 123 /*
119 * For allocated work, we can clear the done/seen bit right here. 124 * For allocated work, we can clear the done/seen bit right here.
@@ -121,9 +126,9 @@ static void wb_work_complete(struct bdi_work *work)
121 * to after the RCU grace period, since the stack could be invalidated 126 * to after the RCU grace period, since the stack could be invalidated
122 * as soon as bdi_work_clear() has done the wakeup. 127 * as soon as bdi_work_clear() has done the wakeup.
123 */ 128 */
124 if (!bdi_work_on_stack(work)) 129 if (!onstack)
125 bdi_work_clear(work); 130 bdi_work_clear(work);
126 if (sync_mode == WB_SYNC_NONE || bdi_work_on_stack(work)) 131 if (sync_mode == WB_SYNC_NONE || onstack)
127 call_rcu(&work->rcu_head, bdi_work_free); 132 call_rcu(&work->rcu_head, bdi_work_free);
128} 133}
129 134
@@ -146,21 +151,19 @@ static void wb_clear_pending(struct bdi_writeback *wb, struct bdi_work *work)
146 151
147static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work) 152static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
148{ 153{
149 if (work) { 154 work->seen = bdi->wb_mask;
150 work->seen = bdi->wb_mask; 155 BUG_ON(!work->seen);
151 BUG_ON(!work->seen); 156 atomic_set(&work->pending, bdi->wb_cnt);
152 atomic_set(&work->pending, bdi->wb_cnt); 157 BUG_ON(!bdi->wb_cnt);
153 BUG_ON(!bdi->wb_cnt);
154
155 /*
156 * Make sure stores are seen before it appears on the list
157 */
158 smp_mb();
159 158
160 spin_lock(&bdi->wb_lock); 159 /*
161 list_add_tail_rcu(&work->list, &bdi->work_list); 160 * list_add_tail_rcu() contains the necessary barriers to
162 spin_unlock(&bdi->wb_lock); 161 * make sure the above stores are seen before the item is
163 } 162 * noticed on the list
163 */
164 spin_lock(&bdi->wb_lock);
165 list_add_tail_rcu(&work->list, &bdi->work_list);
166 spin_unlock(&bdi->wb_lock);
164 167
165 /* 168 /*
166 * If the default thread isn't there, make sure we add it. When 169 * If the default thread isn't there, make sure we add it. When
@@ -171,15 +174,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, struct bdi_work *work)
171 else { 174 else {
172 struct bdi_writeback *wb = &bdi->wb; 175 struct bdi_writeback *wb = &bdi->wb;
173 176
174 /* 177 if (wb->task)
175 * If we failed allocating the bdi work item, wake up the wb
176 * thread always. As a safety precaution, it'll flush out
177 * everything
178 */
179 if (!wb_has_dirty_io(wb)) {
180 if (work)
181 wb_clear_pending(wb, work);
182 } else if (wb->task)
183 wake_up_process(wb->task); 178 wake_up_process(wb->task);
184 } 179 }
185} 180}
@@ -194,48 +189,75 @@ static void bdi_wait_on_work_clear(struct bdi_work *work)
194 TASK_UNINTERRUPTIBLE); 189 TASK_UNINTERRUPTIBLE);
195} 190}
196 191
197static struct bdi_work *bdi_alloc_work(struct writeback_control *wbc) 192static void bdi_alloc_queue_work(struct backing_dev_info *bdi,
193 struct wb_writeback_args *args)
198{ 194{
199 struct bdi_work *work; 195 struct bdi_work *work;
200 196
197 /*
198 * This is WB_SYNC_NONE writeback, so if allocation fails just
199 * wakeup the thread for old dirty data writeback
200 */
201 work = kmalloc(sizeof(*work), GFP_ATOMIC); 201 work = kmalloc(sizeof(*work), GFP_ATOMIC);
202 if (work) 202 if (work) {
203 bdi_work_init(work, wbc); 203 bdi_work_init(work, args);
204 bdi_queue_work(bdi, work);
205 } else {
206 struct bdi_writeback *wb = &bdi->wb;
204 207
205 return work; 208 if (wb->task)
209 wake_up_process(wb->task);
210 }
206} 211}
207 212
208void bdi_start_writeback(struct writeback_control *wbc) 213/**
214 * bdi_sync_writeback - start and wait for writeback
215 * @bdi: the backing device to write from
216 * @sb: write inodes from this super_block
217 *
218 * Description:
219 * This does WB_SYNC_ALL data integrity writeback and waits for the
220 * IO to complete. Callers must hold the sb s_umount semaphore for
221 * reading, to avoid having the super disappear before we are done.
222 */
223static void bdi_sync_writeback(struct backing_dev_info *bdi,
224 struct super_block *sb)
209{ 225{
210 const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; 226 struct wb_writeback_args args = {
211 struct bdi_work work_stack, *work = NULL; 227 .sb = sb,
228 .sync_mode = WB_SYNC_ALL,
229 .nr_pages = LONG_MAX,
230 .range_cyclic = 0,
231 };
232 struct bdi_work work;
212 233
213 if (!must_wait) 234 bdi_work_init(&work, &args);
214 work = bdi_alloc_work(wbc); 235 work.state |= WS_ONSTACK;
215 236
216 if (!work) { 237 bdi_queue_work(bdi, &work);
217 work = &work_stack; 238 bdi_wait_on_work_clear(&work);
218 bdi_work_init_on_stack(work, wbc); 239}
219 }
220 240
221 bdi_queue_work(wbc->bdi, work); 241/**
242 * bdi_start_writeback - start writeback
243 * @bdi: the backing device to write from
244 * @nr_pages: the number of pages to write
245 *
246 * Description:
247 * This does WB_SYNC_NONE opportunistic writeback. The IO is only
248 * started when this function returns, we make no guarentees on
249 * completion. Caller need not hold sb s_umount semaphore.
250 *
251 */
252void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages)
253{
254 struct wb_writeback_args args = {
255 .sync_mode = WB_SYNC_NONE,
256 .nr_pages = nr_pages,
257 .range_cyclic = 1,
258 };
222 259
223 /* 260 bdi_alloc_queue_work(bdi, &args);
224 * If the sync mode is WB_SYNC_ALL, block waiting for the work to
225 * complete. If not, we only need to wait for the work to be started,
226 * if we allocated it on-stack. We use the same mechanism, if the
227 * wait bit is set in the bdi_work struct, then threads will not
228 * clear pending until after they are done.
229 *
230 * Note that work == &work_stack if must_wait is true, so we don't
231 * need to do call_rcu() here ever, since the completion path will
232 * have done that for us.
233 */
234 if (must_wait || work == &work_stack) {
235 bdi_wait_on_work_clear(work);
236 if (work != &work_stack)
237 call_rcu(&work->rcu_head, bdi_work_free);
238 }
239} 261}
240 262
241/* 263/*
@@ -671,17 +693,16 @@ static inline bool over_bground_thresh(void)
671 * older_than_this takes precedence over nr_to_write. So we'll only write back 693 * older_than_this takes precedence over nr_to_write. So we'll only write back
672 * all dirty pages if they are all attached to "old" mappings. 694 * all dirty pages if they are all attached to "old" mappings.
673 */ 695 */
674static long wb_writeback(struct bdi_writeback *wb, long nr_pages, 696static long wb_writeback(struct bdi_writeback *wb,
675 struct super_block *sb, 697 struct wb_writeback_args *args)
676 enum writeback_sync_modes sync_mode, int for_kupdate)
677{ 698{
678 struct writeback_control wbc = { 699 struct writeback_control wbc = {
679 .bdi = wb->bdi, 700 .bdi = wb->bdi,
680 .sb = sb, 701 .sb = args->sb,
681 .sync_mode = sync_mode, 702 .sync_mode = args->sync_mode,
682 .older_than_this = NULL, 703 .older_than_this = NULL,
683 .for_kupdate = for_kupdate, 704 .for_kupdate = args->for_kupdate,
684 .range_cyclic = 1, 705 .range_cyclic = args->range_cyclic,
685 }; 706 };
686 unsigned long oldest_jif; 707 unsigned long oldest_jif;
687 long wrote = 0; 708 long wrote = 0;
@@ -691,13 +712,18 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages,
691 oldest_jif = jiffies - 712 oldest_jif = jiffies -
692 msecs_to_jiffies(dirty_expire_interval * 10); 713 msecs_to_jiffies(dirty_expire_interval * 10);
693 } 714 }
715 if (!wbc.range_cyclic) {
716 wbc.range_start = 0;
717 wbc.range_end = LLONG_MAX;
718 }
694 719
695 for (;;) { 720 for (;;) {
696 /* 721 /*
697 * Don't flush anything for non-integrity writeback where 722 * Don't flush anything for non-integrity writeback where
698 * no nr_pages was given 723 * no nr_pages was given
699 */ 724 */
700 if (!for_kupdate && nr_pages <= 0 && sync_mode == WB_SYNC_NONE) 725 if (!args->for_kupdate && args->nr_pages <= 0 &&
726 args->sync_mode == WB_SYNC_NONE)
701 break; 727 break;
702 728
703 /* 729 /*
@@ -705,7 +731,8 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages,
705 * periodic background writeout and we are below the 731 * periodic background writeout and we are below the
706 * background dirty threshold, don't do anything 732 * background dirty threshold, don't do anything
707 */ 733 */
708 if (for_kupdate && nr_pages <= 0 && !over_bground_thresh()) 734 if (args->for_kupdate && args->nr_pages <= 0 &&
735 !over_bground_thresh())
709 break; 736 break;
710 737
711 wbc.more_io = 0; 738 wbc.more_io = 0;
@@ -713,7 +740,7 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages,
713 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 740 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
714 wbc.pages_skipped = 0; 741 wbc.pages_skipped = 0;
715 writeback_inodes_wb(wb, &wbc); 742 writeback_inodes_wb(wb, &wbc);
716 nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 743 args->nr_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
717 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write; 744 wrote += MAX_WRITEBACK_PAGES - wbc.nr_to_write;
718 745
719 /* 746 /*
@@ -731,7 +758,11 @@ static long wb_writeback(struct bdi_writeback *wb, long nr_pages,
731 758
732/* 759/*
733 * Return the next bdi_work struct that hasn't been processed by this 760 * Return the next bdi_work struct that hasn't been processed by this
734 * wb thread yet 761 * wb thread yet. ->seen is initially set for each thread that exists
762 * for this device, when a thread first notices a piece of work it
763 * clears its bit. Depending on writeback type, the thread will notify
764 * completion on either receiving the work (WB_SYNC_NONE) or after
765 * it is done (WB_SYNC_ALL).
735 */ 766 */
736static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi, 767static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
737 struct bdi_writeback *wb) 768 struct bdi_writeback *wb)
@@ -741,8 +772,9 @@ static struct bdi_work *get_next_work_item(struct backing_dev_info *bdi,
741 rcu_read_lock(); 772 rcu_read_lock();
742 773
743 list_for_each_entry_rcu(work, &bdi->work_list, list) { 774 list_for_each_entry_rcu(work, &bdi->work_list, list) {
744 if (!test_and_clear_bit(wb->nr, &work->seen)) 775 if (!test_bit(wb->nr, &work->seen))
745 continue; 776 continue;
777 clear_bit(wb->nr, &work->seen);
746 778
747 ret = work; 779 ret = work;
748 break; 780 break;
@@ -767,8 +799,16 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb)
767 global_page_state(NR_UNSTABLE_NFS) + 799 global_page_state(NR_UNSTABLE_NFS) +
768 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 800 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
769 801
770 if (nr_pages) 802 if (nr_pages) {
771 return wb_writeback(wb, nr_pages, NULL, WB_SYNC_NONE, 1); 803 struct wb_writeback_args args = {
804 .nr_pages = nr_pages,
805 .sync_mode = WB_SYNC_NONE,
806 .for_kupdate = 1,
807 .range_cyclic = 1,
808 };
809
810 return wb_writeback(wb, &args);
811 }
772 812
773 return 0; 813 return 0;
774} 814}
@@ -780,35 +820,31 @@ long wb_do_writeback(struct bdi_writeback *wb, int force_wait)
780{ 820{
781 struct backing_dev_info *bdi = wb->bdi; 821 struct backing_dev_info *bdi = wb->bdi;
782 struct bdi_work *work; 822 struct bdi_work *work;
783 long nr_pages, wrote = 0; 823 long wrote = 0;
784 824
785 while ((work = get_next_work_item(bdi, wb)) != NULL) { 825 while ((work = get_next_work_item(bdi, wb)) != NULL) {
786 enum writeback_sync_modes sync_mode; 826 struct wb_writeback_args args = work->args;
787
788 nr_pages = work->nr_pages;
789 827
790 /* 828 /*
791 * Override sync mode, in case we must wait for completion 829 * Override sync mode, in case we must wait for completion
792 */ 830 */
793 if (force_wait) 831 if (force_wait)
794 work->sync_mode = sync_mode = WB_SYNC_ALL; 832 work->args.sync_mode = args.sync_mode = WB_SYNC_ALL;
795 else
796 sync_mode = work->sync_mode;
797 833
798 /* 834 /*
799 * If this isn't a data integrity operation, just notify 835 * If this isn't a data integrity operation, just notify
800 * that we have seen this work and we are now starting it. 836 * that we have seen this work and we are now starting it.
801 */ 837 */
802 if (sync_mode == WB_SYNC_NONE) 838 if (args.sync_mode == WB_SYNC_NONE)
803 wb_clear_pending(wb, work); 839 wb_clear_pending(wb, work);
804 840
805 wrote += wb_writeback(wb, nr_pages, work->sb, sync_mode, 0); 841 wrote += wb_writeback(wb, &args);
806 842
807 /* 843 /*
808 * This is a data integrity writeback, so only do the 844 * This is a data integrity writeback, so only do the
809 * notification when we have completed the work. 845 * notification when we have completed the work.
810 */ 846 */
811 if (sync_mode == WB_SYNC_ALL) 847 if (args.sync_mode == WB_SYNC_ALL)
812 wb_clear_pending(wb, work); 848 wb_clear_pending(wb, work);
813 } 849 }
814 850
@@ -849,8 +885,7 @@ int bdi_writeback_task(struct bdi_writeback *wb)
849 } 885 }
850 886
851 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 887 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10);
852 set_current_state(TASK_INTERRUPTIBLE); 888 schedule_timeout_interruptible(wait_jiffies);
853 schedule_timeout(wait_jiffies);
854 try_to_freeze(); 889 try_to_freeze();
855 } 890 }
856 891
@@ -858,67 +893,28 @@ int bdi_writeback_task(struct bdi_writeback *wb)
858} 893}
859 894
860/* 895/*
861 * Schedule writeback for all backing devices. Expensive! If this is a data 896 * Schedule writeback for all backing devices. This does WB_SYNC_NONE
862 * integrity operation, writeback will be complete when this returns. If 897 * writeback, for integrity writeback see bdi_sync_writeback().
863 * we are simply called for WB_SYNC_NONE, then writeback will merely be
864 * scheduled to run.
865 */ 898 */
866static void bdi_writeback_all(struct writeback_control *wbc) 899static void bdi_writeback_all(struct super_block *sb, long nr_pages)
867{ 900{
868 const bool must_wait = wbc->sync_mode == WB_SYNC_ALL; 901 struct wb_writeback_args args = {
902 .sb = sb,
903 .nr_pages = nr_pages,
904 .sync_mode = WB_SYNC_NONE,
905 };
869 struct backing_dev_info *bdi; 906 struct backing_dev_info *bdi;
870 struct bdi_work *work;
871 LIST_HEAD(list);
872
873restart:
874 spin_lock(&bdi_lock);
875 907
876 list_for_each_entry(bdi, &bdi_list, bdi_list) { 908 rcu_read_lock();
877 struct bdi_work *work;
878 909
910 list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
879 if (!bdi_has_dirty_io(bdi)) 911 if (!bdi_has_dirty_io(bdi))
880 continue; 912 continue;
881 913
882 /* 914 bdi_alloc_queue_work(bdi, &args);
883 * If work allocation fails, do the writes inline. We drop
884 * the lock and restart the list writeout. This should be OK,
885 * since this happens rarely and because the writeout should
886 * eventually make more free memory available.
887 */
888 work = bdi_alloc_work(wbc);
889 if (!work) {
890 struct writeback_control __wbc;
891
892 /*
893 * Not a data integrity writeout, just continue
894 */
895 if (!must_wait)
896 continue;
897
898 spin_unlock(&bdi_lock);
899 __wbc = *wbc;
900 __wbc.bdi = bdi;
901 writeback_inodes_wbc(&__wbc);
902 goto restart;
903 }
904 if (must_wait)
905 list_add_tail(&work->wait_list, &list);
906
907 bdi_queue_work(bdi, work);
908 } 915 }
909 916
910 spin_unlock(&bdi_lock); 917 rcu_read_unlock();
911
912 /*
913 * If this is for WB_SYNC_ALL, wait for pending work to complete
914 * before returning.
915 */
916 while (!list_empty(&list)) {
917 work = list_entry(list.next, struct bdi_work, wait_list);
918 list_del(&work->wait_list);
919 bdi_wait_on_work_clear(work);
920 call_rcu(&work->rcu_head, bdi_work_free);
921 }
922} 918}
923 919
924/* 920/*
@@ -927,17 +923,10 @@ restart:
927 */ 923 */
928void wakeup_flusher_threads(long nr_pages) 924void wakeup_flusher_threads(long nr_pages)
929{ 925{
930 struct writeback_control wbc = {
931 .sync_mode = WB_SYNC_NONE,
932 .older_than_this = NULL,
933 .range_cyclic = 1,
934 };
935
936 if (nr_pages == 0) 926 if (nr_pages == 0)
937 nr_pages = global_page_state(NR_FILE_DIRTY) + 927 nr_pages = global_page_state(NR_FILE_DIRTY) +
938 global_page_state(NR_UNSTABLE_NFS); 928 global_page_state(NR_UNSTABLE_NFS);
939 wbc.nr_to_write = nr_pages; 929 bdi_writeback_all(NULL, nr_pages);
940 bdi_writeback_all(&wbc);
941} 930}
942 931
943static noinline void block_dump___mark_inode_dirty(struct inode *inode) 932static noinline void block_dump___mark_inode_dirty(struct inode *inode)
@@ -1084,7 +1073,7 @@ EXPORT_SYMBOL(__mark_inode_dirty);
1084 * on the writer throttling path, and we get decent balancing between many 1073 * on the writer throttling path, and we get decent balancing between many
1085 * throttled threads: we don't want them all piling up on inode_sync_wait. 1074 * throttled threads: we don't want them all piling up on inode_sync_wait.
1086 */ 1075 */
1087static void wait_sb_inodes(struct writeback_control *wbc) 1076static void wait_sb_inodes(struct super_block *sb)
1088{ 1077{
1089 struct inode *inode, *old_inode = NULL; 1078 struct inode *inode, *old_inode = NULL;
1090 1079
@@ -1092,7 +1081,7 @@ static void wait_sb_inodes(struct writeback_control *wbc)
1092 * We need to be protected against the filesystem going from 1081 * We need to be protected against the filesystem going from
1093 * r/o to r/w or vice versa. 1082 * r/o to r/w or vice versa.
1094 */ 1083 */
1095 WARN_ON(!rwsem_is_locked(&wbc->sb->s_umount)); 1084 WARN_ON(!rwsem_is_locked(&sb->s_umount));
1096 1085
1097 spin_lock(&inode_lock); 1086 spin_lock(&inode_lock);
1098 1087
@@ -1103,7 +1092,7 @@ static void wait_sb_inodes(struct writeback_control *wbc)
1103 * In which case, the inode may not be on the dirty list, but 1092 * In which case, the inode may not be on the dirty list, but
1104 * we still have to wait for that writeout. 1093 * we still have to wait for that writeout.
1105 */ 1094 */
1106 list_for_each_entry(inode, &wbc->sb->s_inodes, i_sb_list) { 1095 list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
1107 struct address_space *mapping; 1096 struct address_space *mapping;
1108 1097
1109 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW)) 1098 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE|I_NEW))
@@ -1143,14 +1132,8 @@ static void wait_sb_inodes(struct writeback_control *wbc)
1143 * for IO completion of submitted IO. The number of pages submitted is 1132 * for IO completion of submitted IO. The number of pages submitted is
1144 * returned. 1133 * returned.
1145 */ 1134 */
1146long writeback_inodes_sb(struct super_block *sb) 1135void writeback_inodes_sb(struct super_block *sb)
1147{ 1136{
1148 struct writeback_control wbc = {
1149 .sb = sb,
1150 .sync_mode = WB_SYNC_NONE,
1151 .range_start = 0,
1152 .range_end = LLONG_MAX,
1153 };
1154 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY); 1137 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
1155 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS); 1138 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
1156 long nr_to_write; 1139 long nr_to_write;
@@ -1158,9 +1141,7 @@ long writeback_inodes_sb(struct super_block *sb)
1158 nr_to_write = nr_dirty + nr_unstable + 1141 nr_to_write = nr_dirty + nr_unstable +
1159 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 1142 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
1160 1143
1161 wbc.nr_to_write = nr_to_write; 1144 bdi_writeback_all(sb, nr_to_write);
1162 bdi_writeback_all(&wbc);
1163 return nr_to_write - wbc.nr_to_write;
1164} 1145}
1165EXPORT_SYMBOL(writeback_inodes_sb); 1146EXPORT_SYMBOL(writeback_inodes_sb);
1166 1147
@@ -1171,20 +1152,10 @@ EXPORT_SYMBOL(writeback_inodes_sb);
1171 * This function writes and waits on any dirty inode belonging to this 1152 * This function writes and waits on any dirty inode belonging to this
1172 * super_block. The number of pages synced is returned. 1153 * super_block. The number of pages synced is returned.
1173 */ 1154 */
1174long sync_inodes_sb(struct super_block *sb) 1155void sync_inodes_sb(struct super_block *sb)
1175{ 1156{
1176 struct writeback_control wbc = { 1157 bdi_sync_writeback(sb->s_bdi, sb);
1177 .sb = sb, 1158 wait_sb_inodes(sb);
1178 .sync_mode = WB_SYNC_ALL,
1179 .range_start = 0,
1180 .range_end = LLONG_MAX,
1181 };
1182 long nr_to_write = LONG_MAX; /* doesn't actually matter */
1183
1184 wbc.nr_to_write = nr_to_write;
1185 bdi_writeback_all(&wbc);
1186 wait_sb_inodes(&wbc);
1187 return nr_to_write - wbc.nr_to_write;
1188} 1159}
1189EXPORT_SYMBOL(sync_inodes_sb); 1160EXPORT_SYMBOL(sync_inodes_sb);
1190 1161
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 4567db6f943..e5dbecd87b0 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -894,6 +894,8 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
894 if (err) 894 if (err)
895 goto err_put_conn; 895 goto err_put_conn;
896 896
897 sb->s_bdi = &fc->bdi;
898
897 /* Handle umasking inside the fuse code */ 899 /* Handle umasking inside the fuse code */
898 if (sb->s_flags & MS_POSIXACL) 900 if (sb->s_flags & MS_POSIXACL)
899 fc->dont_mask = 1; 901 fc->dont_mask = 1;
diff --git a/fs/inode.c b/fs/inode.c
index ae7b67e4866..b2ba83d2c4e 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -182,9 +182,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
182 if (sb->s_bdev) { 182 if (sb->s_bdev) {
183 struct backing_dev_info *bdi; 183 struct backing_dev_info *bdi;
184 184
185 bdi = sb->s_bdev->bd_inode_backing_dev_info; 185 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
186 if (!bdi)
187 bdi = sb->s_bdev->bd_inode->i_mapping->backing_dev_info;
188 mapping->backing_dev_info = bdi; 186 mapping->backing_dev_info = bdi;
189 } 187 }
190 inode->i_private = NULL; 188 inode->i_private = NULL;
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 7b4088b2364..0df600e9162 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -220,7 +220,6 @@ static int journal_submit_inode_data_buffers(struct address_space *mapping)
220 .nr_to_write = mapping->nrpages * 2, 220 .nr_to_write = mapping->nrpages * 2,
221 .range_start = 0, 221 .range_start = 0,
222 .range_end = i_size_read(mapping->host), 222 .range_end = i_size_read(mapping->host),
223 .for_writepages = 1,
224 }; 223 };
225 224
226 ret = generic_writepages(mapping, &wbc); 225 ret = generic_writepages(mapping, &wbc);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 867f7050453..de935692d40 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1918,6 +1918,8 @@ static inline void nfs_initialise_sb(struct super_block *sb)
1918 if (server->flags & NFS_MOUNT_NOAC) 1918 if (server->flags & NFS_MOUNT_NOAC)
1919 sb->s_flags |= MS_SYNCHRONOUS; 1919 sb->s_flags |= MS_SYNCHRONOUS;
1920 1920
1921 sb->s_bdi = &server->backing_dev_info;
1922
1921 nfs_super_set_maxbytes(sb, server->maxfilesize); 1923 nfs_super_set_maxbytes(sb, server->maxfilesize);
1922} 1924}
1923 1925
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 120acadc6a8..53eb26c16b5 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1490,7 +1490,6 @@ static int nfs_write_mapping(struct address_space *mapping, int how)
1490 .nr_to_write = LONG_MAX, 1490 .nr_to_write = LONG_MAX,
1491 .range_start = 0, 1491 .range_start = 0,
1492 .range_end = LLONG_MAX, 1492 .range_end = LLONG_MAX,
1493 .for_writepages = 1,
1494 }; 1493 };
1495 1494
1496 return __nfs_write_mapping(mapping, &wbc, how); 1495 return __nfs_write_mapping(mapping, &wbc, how);
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index d4168e269c5..ad391a8c3e7 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -591,9 +591,7 @@ int init_nilfs(struct the_nilfs *nilfs, struct nilfs_sb_info *sbi, char *data)
591 591
592 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state); 592 nilfs->ns_mount_state = le16_to_cpu(sbp->s_state);
593 593
594 bdi = nilfs->ns_bdev->bd_inode_backing_dev_info; 594 bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
595 if (!bdi)
596 bdi = nilfs->ns_bdev->bd_inode->i_mapping->backing_dev_info;
597 nilfs->ns_bdi = bdi ? : &default_backing_dev_info; 595 nilfs->ns_bdi = bdi ? : &default_backing_dev_info;
598 596
599 /* Finding last segment */ 597 /* Finding last segment */
diff --git a/fs/super.c b/fs/super.c
index 9cda337ddae..b03fea8fbfb 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -707,6 +707,12 @@ static int set_bdev_super(struct super_block *s, void *data)
707{ 707{
708 s->s_bdev = data; 708 s->s_bdev = data;
709 s->s_dev = s->s_bdev->bd_dev; 709 s->s_dev = s->s_bdev->bd_dev;
710
711 /*
712 * We set the bdi here to the queue backing, file systems can
713 * overwrite this in ->fill_super()
714 */
715 s->s_bdi = &bdev_get_queue(s->s_bdev)->backing_dev_info;
710 return 0; 716 return 0;
711} 717}
712 718
diff --git a/fs/sync.c b/fs/sync.c
index 192340930bb..c08467a5d7c 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -27,6 +27,13 @@
27 */ 27 */
28static int __sync_filesystem(struct super_block *sb, int wait) 28static int __sync_filesystem(struct super_block *sb, int wait)
29{ 29{
30 /*
31 * This should be safe, as we require bdi backing to actually
32 * write out data in the first place
33 */
34 if (!sb->s_bdi)
35 return 0;
36
30 /* Avoid doing twice syncing and cache pruning for quota sync */ 37 /* Avoid doing twice syncing and cache pruning for quota sync */
31 if (!wait) { 38 if (!wait) {
32 writeout_quota_sb(sb, -1); 39 writeout_quota_sb(sb, -1);
@@ -101,7 +108,7 @@ restart:
101 spin_unlock(&sb_lock); 108 spin_unlock(&sb_lock);
102 109
103 down_read(&sb->s_umount); 110 down_read(&sb->s_umount);
104 if (!(sb->s_flags & MS_RDONLY) && sb->s_root) 111 if (!(sb->s_flags & MS_RDONLY) && sb->s_root && sb->s_bdi)
105 __sync_filesystem(sb, wait); 112 __sync_filesystem(sb, wait);
106 up_read(&sb->s_umount); 113 up_read(&sb->s_umount);
107 114
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 1c8991b0db1..ee1ce68fd98 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -54,29 +54,15 @@
54 * @nr_to_write: how many dirty pages to write-back 54 * @nr_to_write: how many dirty pages to write-back
55 * 55 *
56 * This function shrinks UBIFS liability by means of writing back some amount 56 * This function shrinks UBIFS liability by means of writing back some amount
57 * of dirty inodes and their pages. Returns the amount of pages which were 57 * of dirty inodes and their pages.
58 * written back. The returned value does not include dirty inodes which were
59 * synchronized.
60 * 58 *
61 * Note, this function synchronizes even VFS inodes which are locked 59 * Note, this function synchronizes even VFS inodes which are locked
62 * (@i_mutex) by the caller of the budgeting function, because write-back does 60 * (@i_mutex) by the caller of the budgeting function, because write-back does
63 * not touch @i_mutex. 61 * not touch @i_mutex.
64 */ 62 */
65static int shrink_liability(struct ubifs_info *c, int nr_to_write) 63static void shrink_liability(struct ubifs_info *c, int nr_to_write)
66{ 64{
67 int nr_written; 65 writeback_inodes_sb(c->vfs_sb);
68
69 nr_written = writeback_inodes_sb(c->vfs_sb);
70 if (!nr_written) {
71 /*
72 * Re-try again but wait on pages/inodes which are being
73 * written-back concurrently (e.g., by pdflush).
74 */
75 nr_written = sync_inodes_sb(c->vfs_sb);
76 }
77
78 dbg_budg("%d pages were written back", nr_written);
79 return nr_written;
80} 66}
81 67
82/** 68/**
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 51763aa8f4d..c4af069df1a 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1980,6 +1980,7 @@ static int ubifs_fill_super(struct super_block *sb, void *data, int silent)
1980 if (err) 1980 if (err)
1981 goto out_bdi; 1981 goto out_bdi;
1982 1982
1983 sb->s_bdi = &c->bdi;
1983 sb->s_fs_info = c; 1984 sb->s_fs_info = c;
1984 sb->s_magic = UBIFS_SUPER_MAGIC; 1985 sb->s_magic = UBIFS_SUPER_MAGIC;
1985 sb->s_blocksize = UBIFS_BLOCK_SIZE; 1986 sb->s_blocksize = UBIFS_BLOCK_SIZE;
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index f169bcb90b5..0ee33c2e612 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -59,6 +59,7 @@ struct bdi_writeback {
59 59
60struct backing_dev_info { 60struct backing_dev_info {
61 struct list_head bdi_list; 61 struct list_head bdi_list;
62 struct rcu_head rcu_head;
62 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */ 63 unsigned long ra_pages; /* max readahead in PAGE_CACHE_SIZE units */
63 unsigned long state; /* Always use atomic bitops on this */ 64 unsigned long state; /* Always use atomic bitops on this */
64 unsigned int capabilities; /* Device capabilities */ 65 unsigned int capabilities; /* Device capabilities */
@@ -100,7 +101,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
100 const char *fmt, ...); 101 const char *fmt, ...);
101int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); 102int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
102void bdi_unregister(struct backing_dev_info *bdi); 103void bdi_unregister(struct backing_dev_info *bdi);
103void bdi_start_writeback(struct writeback_control *wbc); 104void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages);
104int bdi_writeback_task(struct bdi_writeback *wb); 105int bdi_writeback_task(struct bdi_writeback *wb);
105int bdi_has_dirty_io(struct backing_dev_info *bdi); 106int bdi_has_dirty_io(struct backing_dev_info *bdi);
106 107
diff --git a/include/linux/fs.h b/include/linux/fs.h
index b21cf6b9c80..90162fb3bf0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -655,7 +655,6 @@ struct block_device {
655 int bd_invalidated; 655 int bd_invalidated;
656 struct gendisk * bd_disk; 656 struct gendisk * bd_disk;
657 struct list_head bd_list; 657 struct list_head bd_list;
658 struct backing_dev_info *bd_inode_backing_dev_info;
659 /* 658 /*
660 * Private data. You must have bd_claim'ed the block_device 659 * Private data. You must have bd_claim'ed the block_device
661 * to use this. NOTE: bd_claim allows an owner to claim 660 * to use this. NOTE: bd_claim allows an owner to claim
@@ -1343,6 +1342,7 @@ struct super_block {
1343 int s_nr_dentry_unused; /* # of dentry on lru */ 1342 int s_nr_dentry_unused; /* # of dentry on lru */
1344 1343
1345 struct block_device *s_bdev; 1344 struct block_device *s_bdev;
1345 struct backing_dev_info *s_bdi;
1346 struct mtd_info *s_mtd; 1346 struct mtd_info *s_mtd;
1347 struct list_head s_instances; 1347 struct list_head s_instances;
1348 struct quota_info s_dquot; /* Diskquota specific options */ 1348 struct quota_info s_dquot; /* Diskquota specific options */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index d347632f186..75cf58666ff 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -50,7 +50,6 @@ struct writeback_control {
50 unsigned encountered_congestion:1; /* An output: a queue is full */ 50 unsigned encountered_congestion:1; /* An output: a queue is full */
51 unsigned for_kupdate:1; /* A kupdate writeback */ 51 unsigned for_kupdate:1; /* A kupdate writeback */
52 unsigned for_reclaim:1; /* Invoked from the page allocator */ 52 unsigned for_reclaim:1; /* Invoked from the page allocator */
53 unsigned for_writepages:1; /* This is a writepages() call */
54 unsigned range_cyclic:1; /* range_start is cyclic */ 53 unsigned range_cyclic:1; /* range_start is cyclic */
55 unsigned more_io:1; /* more io to be dispatched */ 54 unsigned more_io:1; /* more io to be dispatched */
56 /* 55 /*
@@ -69,8 +68,8 @@ struct writeback_control {
69 */ 68 */
70struct bdi_writeback; 69struct bdi_writeback;
71int inode_wait(void *); 70int inode_wait(void *);
72long writeback_inodes_sb(struct super_block *); 71void writeback_inodes_sb(struct super_block *);
73long sync_inodes_sb(struct super_block *); 72void sync_inodes_sb(struct super_block *);
74void writeback_inodes_wbc(struct writeback_control *wbc); 73void writeback_inodes_wbc(struct writeback_control *wbc);
75long wb_do_writeback(struct bdi_writeback *wb, int force_wait); 74long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
76void wakeup_flusher_threads(long nr_pages); 75void wakeup_flusher_threads(long nr_pages);
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 7d8b5bc7418..8d433c4e370 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -227,7 +227,6 @@ TRACE_EVENT(ext4_da_writepages,
227 __field( char, nonblocking ) 227 __field( char, nonblocking )
228 __field( char, for_kupdate ) 228 __field( char, for_kupdate )
229 __field( char, for_reclaim ) 229 __field( char, for_reclaim )
230 __field( char, for_writepages )
231 __field( char, range_cyclic ) 230 __field( char, range_cyclic )
232 ), 231 ),
233 232
@@ -241,16 +240,15 @@ TRACE_EVENT(ext4_da_writepages,
241 __entry->nonblocking = wbc->nonblocking; 240 __entry->nonblocking = wbc->nonblocking;
242 __entry->for_kupdate = wbc->for_kupdate; 241 __entry->for_kupdate = wbc->for_kupdate;
243 __entry->for_reclaim = wbc->for_reclaim; 242 __entry->for_reclaim = wbc->for_reclaim;
244 __entry->for_writepages = wbc->for_writepages;
245 __entry->range_cyclic = wbc->range_cyclic; 243 __entry->range_cyclic = wbc->range_cyclic;
246 ), 244 ),
247 245
248 TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d for_writepages %d range_cyclic %d", 246 TP_printk("dev %s ino %lu nr_t_write %ld pages_skipped %ld range_start %llu range_end %llu nonblocking %d for_kupdate %d for_reclaim %d range_cyclic %d",
249 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write, 247 jbd2_dev_to_name(__entry->dev), __entry->ino, __entry->nr_to_write,
250 __entry->pages_skipped, __entry->range_start, 248 __entry->pages_skipped, __entry->range_start,
251 __entry->range_end, __entry->nonblocking, 249 __entry->range_end, __entry->nonblocking,
252 __entry->for_kupdate, __entry->for_reclaim, 250 __entry->for_kupdate, __entry->for_reclaim,
253 __entry->for_writepages, __entry->range_cyclic) 251 __entry->range_cyclic)
254); 252);
255 253
256TRACE_EVENT(ext4_da_writepages_result, 254TRACE_EVENT(ext4_da_writepages_result,
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index d3ca0dac111..3d3accb1f80 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -26,6 +26,12 @@ struct backing_dev_info default_backing_dev_info = {
26EXPORT_SYMBOL_GPL(default_backing_dev_info); 26EXPORT_SYMBOL_GPL(default_backing_dev_info);
27 27
28static struct class *bdi_class; 28static struct class *bdi_class;
29
30/*
31 * bdi_lock protects updates to bdi_list and bdi_pending_list, as well as
32 * reader side protection for bdi_pending_list. bdi_list has RCU reader side
33 * locking.
34 */
29DEFINE_SPINLOCK(bdi_lock); 35DEFINE_SPINLOCK(bdi_lock);
30LIST_HEAD(bdi_list); 36LIST_HEAD(bdi_list);
31LIST_HEAD(bdi_pending_list); 37LIST_HEAD(bdi_pending_list);
@@ -284,9 +290,9 @@ static int bdi_start_fn(void *ptr)
284 /* 290 /*
285 * Add us to the active bdi_list 291 * Add us to the active bdi_list
286 */ 292 */
287 spin_lock(&bdi_lock); 293 spin_lock_bh(&bdi_lock);
288 list_add(&bdi->bdi_list, &bdi_list); 294 list_add_rcu(&bdi->bdi_list, &bdi_list);
289 spin_unlock(&bdi_lock); 295 spin_unlock_bh(&bdi_lock);
290 296
291 bdi_task_init(bdi, wb); 297 bdi_task_init(bdi, wb);
292 298
@@ -389,7 +395,7 @@ static int bdi_forker_task(void *ptr)
389 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list)) 395 if (wb_has_dirty_io(me) || !list_empty(&me->bdi->work_list))
390 wb_do_writeback(me, 0); 396 wb_do_writeback(me, 0);
391 397
392 spin_lock(&bdi_lock); 398 spin_lock_bh(&bdi_lock);
393 399
394 /* 400 /*
395 * Check if any existing bdi's have dirty data without 401 * Check if any existing bdi's have dirty data without
@@ -410,7 +416,7 @@ static int bdi_forker_task(void *ptr)
410 if (list_empty(&bdi_pending_list)) { 416 if (list_empty(&bdi_pending_list)) {
411 unsigned long wait; 417 unsigned long wait;
412 418
413 spin_unlock(&bdi_lock); 419 spin_unlock_bh(&bdi_lock);
414 wait = msecs_to_jiffies(dirty_writeback_interval * 10); 420 wait = msecs_to_jiffies(dirty_writeback_interval * 10);
415 schedule_timeout(wait); 421 schedule_timeout(wait);
416 try_to_freeze(); 422 try_to_freeze();
@@ -426,7 +432,7 @@ static int bdi_forker_task(void *ptr)
426 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info, 432 bdi = list_entry(bdi_pending_list.next, struct backing_dev_info,
427 bdi_list); 433 bdi_list);
428 list_del_init(&bdi->bdi_list); 434 list_del_init(&bdi->bdi_list);
429 spin_unlock(&bdi_lock); 435 spin_unlock_bh(&bdi_lock);
430 436
431 wb = &bdi->wb; 437 wb = &bdi->wb;
432 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s", 438 wb->task = kthread_run(bdi_start_fn, wb, "flush-%s",
@@ -445,9 +451,9 @@ static int bdi_forker_task(void *ptr)
445 * a chance to flush other bdi's to free 451 * a chance to flush other bdi's to free
446 * memory. 452 * memory.
447 */ 453 */
448 spin_lock(&bdi_lock); 454 spin_lock_bh(&bdi_lock);
449 list_add_tail(&bdi->bdi_list, &bdi_pending_list); 455 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
450 spin_unlock(&bdi_lock); 456 spin_unlock_bh(&bdi_lock);
451 457
452 bdi_flush_io(bdi); 458 bdi_flush_io(bdi);
453 } 459 }
@@ -456,6 +462,24 @@ static int bdi_forker_task(void *ptr)
456 return 0; 462 return 0;
457} 463}
458 464
465static void bdi_add_to_pending(struct rcu_head *head)
466{
467 struct backing_dev_info *bdi;
468
469 bdi = container_of(head, struct backing_dev_info, rcu_head);
470 INIT_LIST_HEAD(&bdi->bdi_list);
471
472 spin_lock(&bdi_lock);
473 list_add_tail(&bdi->bdi_list, &bdi_pending_list);
474 spin_unlock(&bdi_lock);
475
476 /*
477 * We are now on the pending list, wake up bdi_forker_task()
478 * to finish the job and add us back to the active bdi_list
479 */
480 wake_up_process(default_backing_dev_info.wb.task);
481}
482
459/* 483/*
460 * Add the default flusher task that gets created for any bdi 484 * Add the default flusher task that gets created for any bdi
461 * that has dirty data pending writeout 485 * that has dirty data pending writeout
@@ -478,16 +502,29 @@ void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
478 * waiting for previous additions to finish. 502 * waiting for previous additions to finish.
479 */ 503 */
480 if (!test_and_set_bit(BDI_pending, &bdi->state)) { 504 if (!test_and_set_bit(BDI_pending, &bdi->state)) {
481 list_move_tail(&bdi->bdi_list, &bdi_pending_list); 505 list_del_rcu(&bdi->bdi_list);
482 506
483 /* 507 /*
484 * We are now on the pending list, wake up bdi_forker_task() 508 * We must wait for the current RCU period to end before
485 * to finish the job and add us back to the active bdi_list 509 * moving to the pending list. So schedule that operation
510 * from an RCU callback.
486 */ 511 */
487 wake_up_process(default_backing_dev_info.wb.task); 512 call_rcu(&bdi->rcu_head, bdi_add_to_pending);
488 } 513 }
489} 514}
490 515
516/*
517 * Remove bdi from bdi_list, and ensure that it is no longer visible
518 */
519static void bdi_remove_from_list(struct backing_dev_info *bdi)
520{
521 spin_lock_bh(&bdi_lock);
522 list_del_rcu(&bdi->bdi_list);
523 spin_unlock_bh(&bdi_lock);
524
525 synchronize_rcu();
526}
527
491int bdi_register(struct backing_dev_info *bdi, struct device *parent, 528int bdi_register(struct backing_dev_info *bdi, struct device *parent,
492 const char *fmt, ...) 529 const char *fmt, ...)
493{ 530{
@@ -506,9 +543,9 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
506 goto exit; 543 goto exit;
507 } 544 }
508 545
509 spin_lock(&bdi_lock); 546 spin_lock_bh(&bdi_lock);
510 list_add_tail(&bdi->bdi_list, &bdi_list); 547 list_add_tail_rcu(&bdi->bdi_list, &bdi_list);
511 spin_unlock(&bdi_lock); 548 spin_unlock_bh(&bdi_lock);
512 549
513 bdi->dev = dev; 550 bdi->dev = dev;
514 551
@@ -526,9 +563,7 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent,
526 wb->task = NULL; 563 wb->task = NULL;
527 ret = -ENOMEM; 564 ret = -ENOMEM;
528 565
529 spin_lock(&bdi_lock); 566 bdi_remove_from_list(bdi);
530 list_del(&bdi->bdi_list);
531 spin_unlock(&bdi_lock);
532 goto exit; 567 goto exit;
533 } 568 }
534 } 569 }
@@ -565,9 +600,7 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
565 /* 600 /*
566 * Make sure nobody finds us on the bdi_list anymore 601 * Make sure nobody finds us on the bdi_list anymore
567 */ 602 */
568 spin_lock(&bdi_lock); 603 bdi_remove_from_list(bdi);
569 list_del(&bdi->bdi_list);
570 spin_unlock(&bdi_lock);
571 604
572 /* 605 /*
573 * Finally, kill the kernel threads. We don't need to be RCU 606 * Finally, kill the kernel threads. We don't need to be RCU
@@ -599,6 +632,7 @@ int bdi_init(struct backing_dev_info *bdi)
599 bdi->max_ratio = 100; 632 bdi->max_ratio = 100;
600 bdi->max_prop_frac = PROP_FRAC_BASE; 633 bdi->max_prop_frac = PROP_FRAC_BASE;
601 spin_lock_init(&bdi->wb_lock); 634 spin_lock_init(&bdi->wb_lock);
635 INIT_RCU_HEAD(&bdi->rcu_head);
602 INIT_LIST_HEAD(&bdi->bdi_list); 636 INIT_LIST_HEAD(&bdi->bdi_list);
603 INIT_LIST_HEAD(&bdi->wb_list); 637 INIT_LIST_HEAD(&bdi->wb_list);
604 INIT_LIST_HEAD(&bdi->work_list); 638 INIT_LIST_HEAD(&bdi->work_list);
@@ -634,7 +668,19 @@ void bdi_destroy(struct backing_dev_info *bdi)
634{ 668{
635 int i; 669 int i;
636 670
637 WARN_ON(bdi_has_dirty_io(bdi)); 671 /*
672 * Splice our entries to the default_backing_dev_info, if this
673 * bdi disappears
674 */
675 if (bdi_has_dirty_io(bdi)) {
676 struct bdi_writeback *dst = &default_backing_dev_info.wb;
677
678 spin_lock(&inode_lock);
679 list_splice(&bdi->wb.b_dirty, &dst->b_dirty);
680 list_splice(&bdi->wb.b_io, &dst->b_io);
681 list_splice(&bdi->wb.b_more_io, &dst->b_more_io);
682 spin_unlock(&inode_lock);
683 }
638 684
639 bdi_unregister(bdi); 685 bdi_unregister(bdi);
640 686
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index dd73d29c15a..1eea4fa0d41 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -315,7 +315,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
315{ 315{
316 int ret = 0; 316 int ret = 0;
317 317
318 spin_lock(&bdi_lock); 318 spin_lock_bh(&bdi_lock);
319 if (min_ratio > bdi->max_ratio) { 319 if (min_ratio > bdi->max_ratio) {
320 ret = -EINVAL; 320 ret = -EINVAL;
321 } else { 321 } else {
@@ -327,7 +327,7 @@ int bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ratio)
327 ret = -EINVAL; 327 ret = -EINVAL;
328 } 328 }
329 } 329 }
330 spin_unlock(&bdi_lock); 330 spin_unlock_bh(&bdi_lock);
331 331
332 return ret; 332 return ret;
333} 333}
@@ -339,14 +339,14 @@ int bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned max_ratio)
339 if (max_ratio > 100) 339 if (max_ratio > 100)
340 return -EINVAL; 340 return -EINVAL;
341 341
342 spin_lock(&bdi_lock); 342 spin_lock_bh(&bdi_lock);
343 if (bdi->min_ratio > max_ratio) { 343 if (bdi->min_ratio > max_ratio) {
344 ret = -EINVAL; 344 ret = -EINVAL;
345 } else { 345 } else {
346 bdi->max_ratio = max_ratio; 346 bdi->max_ratio = max_ratio;
347 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100; 347 bdi->max_prop_frac = (PROP_FRAC_BASE * max_ratio) / 100;
348 } 348 }
349 spin_unlock(&bdi_lock); 349 spin_unlock_bh(&bdi_lock);
350 350
351 return ret; 351 return ret;
352} 352}
@@ -582,16 +582,8 @@ static void balance_dirty_pages(struct address_space *mapping)
582 if ((laptop_mode && pages_written) || 582 if ((laptop_mode && pages_written) ||
583 (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY) 583 (!laptop_mode && ((nr_writeback = global_page_state(NR_FILE_DIRTY)
584 + global_page_state(NR_UNSTABLE_NFS)) 584 + global_page_state(NR_UNSTABLE_NFS))
585 > background_thresh))) { 585 > background_thresh)))
586 struct writeback_control wbc = { 586 bdi_start_writeback(bdi, nr_writeback);
587 .bdi = bdi,
588 .sync_mode = WB_SYNC_NONE,
589 .nr_to_write = nr_writeback,
590 };
591
592
593 bdi_start_writeback(&wbc);
594 }
595} 587}
596 588
597void set_page_dirty_balance(struct page *page, int page_mkwrite) 589void set_page_dirty_balance(struct page *page, int page_mkwrite)
@@ -1020,12 +1012,10 @@ int do_writepages(struct address_space *mapping, struct writeback_control *wbc)
1020 1012
1021 if (wbc->nr_to_write <= 0) 1013 if (wbc->nr_to_write <= 0)
1022 return 0; 1014 return 0;
1023 wbc->for_writepages = 1;
1024 if (mapping->a_ops->writepages) 1015 if (mapping->a_ops->writepages)
1025 ret = mapping->a_ops->writepages(mapping, wbc); 1016 ret = mapping->a_ops->writepages(mapping, wbc);
1026 else 1017 else
1027 ret = generic_writepages(mapping, wbc); 1018 ret = generic_writepages(mapping, wbc);
1028 wbc->for_writepages = 0;
1029 return ret; 1019 return ret;
1030} 1020}
1031 1021