aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/fs-writeback.c2
-rw-r--r--fs/fuse/file.c12
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/write.c3
-rw-r--r--include/linux/backing-dev.h68
-rw-r--r--mm/backing-dev.c60
-rw-r--r--mm/page-writeback.c55
7 files changed, 106 insertions, 96 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 983312cea245..8873ecd1578c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -840,7 +840,7 @@ static bool over_bground_thresh(struct backing_dev_info *bdi)
840 global_page_state(NR_UNSTABLE_NFS) > background_thresh) 840 global_page_state(NR_UNSTABLE_NFS) > background_thresh)
841 return true; 841 return true;
842 842
843 if (bdi_stat(bdi, BDI_RECLAIMABLE) > 843 if (wb_stat(&bdi->wb, WB_RECLAIMABLE) >
844 bdi_dirty_limit(bdi, background_thresh)) 844 bdi_dirty_limit(bdi, background_thresh))
845 return true; 845 return true;
846 846
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 5ef05b5c4cff..8c5e2fa68835 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1445,9 +1445,9 @@ static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req)
1445 1445
1446 list_del(&req->writepages_entry); 1446 list_del(&req->writepages_entry);
1447 for (i = 0; i < req->num_pages; i++) { 1447 for (i = 0; i < req->num_pages; i++) {
1448 dec_bdi_stat(bdi, BDI_WRITEBACK); 1448 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1449 dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); 1449 dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP);
1450 bdi_writeout_inc(bdi); 1450 wb_writeout_inc(&bdi->wb);
1451 } 1451 }
1452 wake_up(&fi->page_waitq); 1452 wake_up(&fi->page_waitq);
1453} 1453}
@@ -1634,7 +1634,7 @@ static int fuse_writepage_locked(struct page *page)
1634 req->end = fuse_writepage_end; 1634 req->end = fuse_writepage_end;
1635 req->inode = inode; 1635 req->inode = inode;
1636 1636
1637 inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK); 1637 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
1638 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1638 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1639 1639
1640 spin_lock(&fc->lock); 1640 spin_lock(&fc->lock);
@@ -1749,9 +1749,9 @@ static bool fuse_writepage_in_flight(struct fuse_req *new_req,
1749 copy_highpage(old_req->pages[0], page); 1749 copy_highpage(old_req->pages[0], page);
1750 spin_unlock(&fc->lock); 1750 spin_unlock(&fc->lock);
1751 1751
1752 dec_bdi_stat(bdi, BDI_WRITEBACK); 1752 dec_wb_stat(&bdi->wb, WB_WRITEBACK);
1753 dec_zone_page_state(page, NR_WRITEBACK_TEMP); 1753 dec_zone_page_state(page, NR_WRITEBACK_TEMP);
1754 bdi_writeout_inc(bdi); 1754 wb_writeout_inc(&bdi->wb);
1755 fuse_writepage_free(fc, new_req); 1755 fuse_writepage_free(fc, new_req);
1756 fuse_request_free(new_req); 1756 fuse_request_free(new_req);
1757 goto out; 1757 goto out;
@@ -1848,7 +1848,7 @@ static int fuse_writepages_fill(struct page *page,
1848 req->page_descs[req->num_pages].offset = 0; 1848 req->page_descs[req->num_pages].offset = 0;
1849 req->page_descs[req->num_pages].length = PAGE_SIZE; 1849 req->page_descs[req->num_pages].length = PAGE_SIZE;
1850 1850
1851 inc_bdi_stat(inode_to_bdi(inode), BDI_WRITEBACK); 1851 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_WRITEBACK);
1852 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); 1852 inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP);
1853 1853
1854 err = 0; 1854 err = 0;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 9e6475bc5ba2..7e3c4604bea8 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -607,7 +607,7 @@ void nfs_mark_page_unstable(struct page *page)
607 struct inode *inode = page_file_mapping(page)->host; 607 struct inode *inode = page_file_mapping(page)->host;
608 608
609 inc_zone_page_state(page, NR_UNSTABLE_NFS); 609 inc_zone_page_state(page, NR_UNSTABLE_NFS);
610 inc_bdi_stat(inode_to_bdi(inode), BDI_RECLAIMABLE); 610 inc_wb_stat(&inode_to_bdi(inode)->wb, WB_RECLAIMABLE);
611 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 611 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
612} 612}
613 613
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index d12a4be613a5..94c7ce01dfb1 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -853,7 +853,8 @@ static void
853nfs_clear_page_commit(struct page *page) 853nfs_clear_page_commit(struct page *page)
854{ 854{
855 dec_zone_page_state(page, NR_UNSTABLE_NFS); 855 dec_zone_page_state(page, NR_UNSTABLE_NFS);
856 dec_bdi_stat(inode_to_bdi(page_file_mapping(page)->host), BDI_RECLAIMABLE); 856 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb,
857 WB_RECLAIMABLE);
857} 858}
858 859
859/* Called holding inode (/cinfo) lock */ 860/* Called holding inode (/cinfo) lock */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index eb14f988a63e..fe7a907a4e16 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -36,15 +36,15 @@ enum wb_state {
36 36
37typedef int (congested_fn)(void *, int); 37typedef int (congested_fn)(void *, int);
38 38
39enum bdi_stat_item { 39enum wb_stat_item {
40 BDI_RECLAIMABLE, 40 WB_RECLAIMABLE,
41 BDI_WRITEBACK, 41 WB_WRITEBACK,
42 BDI_DIRTIED, 42 WB_DIRTIED,
43 BDI_WRITTEN, 43 WB_WRITTEN,
44 NR_BDI_STAT_ITEMS 44 NR_WB_STAT_ITEMS
45}; 45};
46 46
47#define BDI_STAT_BATCH (8*(1+ilog2(nr_cpu_ids))) 47#define WB_STAT_BATCH (8*(1+ilog2(nr_cpu_ids)))
48 48
49struct bdi_writeback { 49struct bdi_writeback {
50 struct backing_dev_info *bdi; /* our parent bdi */ 50 struct backing_dev_info *bdi; /* our parent bdi */
@@ -58,6 +58,8 @@ struct bdi_writeback {
58 struct list_head b_more_io; /* parked for more writeback */ 58 struct list_head b_more_io; /* parked for more writeback */
59 struct list_head b_dirty_time; /* time stamps are dirty */ 59 struct list_head b_dirty_time; /* time stamps are dirty */
60 spinlock_t list_lock; /* protects the b_* lists */ 60 spinlock_t list_lock; /* protects the b_* lists */
61
62 struct percpu_counter stat[NR_WB_STAT_ITEMS];
61}; 63};
62 64
63struct backing_dev_info { 65struct backing_dev_info {
@@ -69,8 +71,6 @@ struct backing_dev_info {
69 71
70 char *name; 72 char *name;
71 73
72 struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS];
73
74 unsigned long bw_time_stamp; /* last time write bw is updated */ 74 unsigned long bw_time_stamp; /* last time write bw is updated */
75 unsigned long dirtied_stamp; 75 unsigned long dirtied_stamp;
76 unsigned long written_stamp; /* pages written at bw_time_stamp */ 76 unsigned long written_stamp; /* pages written at bw_time_stamp */
@@ -137,78 +137,74 @@ static inline int wb_has_dirty_io(struct bdi_writeback *wb)
137 !list_empty(&wb->b_more_io); 137 !list_empty(&wb->b_more_io);
138} 138}
139 139
140static inline void __add_bdi_stat(struct backing_dev_info *bdi, 140static inline void __add_wb_stat(struct bdi_writeback *wb,
141 enum bdi_stat_item item, s64 amount) 141 enum wb_stat_item item, s64 amount)
142{ 142{
143 __percpu_counter_add(&bdi->bdi_stat[item], amount, BDI_STAT_BATCH); 143 __percpu_counter_add(&wb->stat[item], amount, WB_STAT_BATCH);
144} 144}
145 145
146static inline void __inc_bdi_stat(struct backing_dev_info *bdi, 146static inline void __inc_wb_stat(struct bdi_writeback *wb,
147 enum bdi_stat_item item) 147 enum wb_stat_item item)
148{ 148{
149 __add_bdi_stat(bdi, item, 1); 149 __add_wb_stat(wb, item, 1);
150} 150}
151 151
152static inline void inc_bdi_stat(struct backing_dev_info *bdi, 152static inline void inc_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
153 enum bdi_stat_item item)
154{ 153{
155 unsigned long flags; 154 unsigned long flags;
156 155
157 local_irq_save(flags); 156 local_irq_save(flags);
158 __inc_bdi_stat(bdi, item); 157 __inc_wb_stat(wb, item);
159 local_irq_restore(flags); 158 local_irq_restore(flags);
160} 159}
161 160
162static inline void __dec_bdi_stat(struct backing_dev_info *bdi, 161static inline void __dec_wb_stat(struct bdi_writeback *wb,
163 enum bdi_stat_item item) 162 enum wb_stat_item item)
164{ 163{
165 __add_bdi_stat(bdi, item, -1); 164 __add_wb_stat(wb, item, -1);
166} 165}
167 166
168static inline void dec_bdi_stat(struct backing_dev_info *bdi, 167static inline void dec_wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
169 enum bdi_stat_item item)
170{ 168{
171 unsigned long flags; 169 unsigned long flags;
172 170
173 local_irq_save(flags); 171 local_irq_save(flags);
174 __dec_bdi_stat(bdi, item); 172 __dec_wb_stat(wb, item);
175 local_irq_restore(flags); 173 local_irq_restore(flags);
176} 174}
177 175
178static inline s64 bdi_stat(struct backing_dev_info *bdi, 176static inline s64 wb_stat(struct bdi_writeback *wb, enum wb_stat_item item)
179 enum bdi_stat_item item)
180{ 177{
181 return percpu_counter_read_positive(&bdi->bdi_stat[item]); 178 return percpu_counter_read_positive(&wb->stat[item]);
182} 179}
183 180
184static inline s64 __bdi_stat_sum(struct backing_dev_info *bdi, 181static inline s64 __wb_stat_sum(struct bdi_writeback *wb,
185 enum bdi_stat_item item) 182 enum wb_stat_item item)
186{ 183{
187 return percpu_counter_sum_positive(&bdi->bdi_stat[item]); 184 return percpu_counter_sum_positive(&wb->stat[item]);
188} 185}
189 186
190static inline s64 bdi_stat_sum(struct backing_dev_info *bdi, 187static inline s64 wb_stat_sum(struct bdi_writeback *wb, enum wb_stat_item item)
191 enum bdi_stat_item item)
192{ 188{
193 s64 sum; 189 s64 sum;
194 unsigned long flags; 190 unsigned long flags;
195 191
196 local_irq_save(flags); 192 local_irq_save(flags);
197 sum = __bdi_stat_sum(bdi, item); 193 sum = __wb_stat_sum(wb, item);
198 local_irq_restore(flags); 194 local_irq_restore(flags);
199 195
200 return sum; 196 return sum;
201} 197}
202 198
203extern void bdi_writeout_inc(struct backing_dev_info *bdi); 199extern void wb_writeout_inc(struct bdi_writeback *wb);
204 200
205/* 201/*
206 * maximal error of a stat counter. 202 * maximal error of a stat counter.
207 */ 203 */
208static inline unsigned long bdi_stat_error(struct backing_dev_info *bdi) 204static inline unsigned long wb_stat_error(struct bdi_writeback *wb)
209{ 205{
210#ifdef CONFIG_SMP 206#ifdef CONFIG_SMP
211 return nr_cpu_ids * BDI_STAT_BATCH; 207 return nr_cpu_ids * WB_STAT_BATCH;
212#else 208#else
213 return 1; 209 return 1;
214#endif 210#endif
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index b23cf0ea5912..7b1d1917b658 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -84,13 +84,13 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v)
84 "b_dirty_time: %10lu\n" 84 "b_dirty_time: %10lu\n"
85 "bdi_list: %10u\n" 85 "bdi_list: %10u\n"
86 "state: %10lx\n", 86 "state: %10lx\n",
87 (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), 87 (unsigned long) K(wb_stat(wb, WB_WRITEBACK)),
88 (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), 88 (unsigned long) K(wb_stat(wb, WB_RECLAIMABLE)),
89 K(bdi_thresh), 89 K(bdi_thresh),
90 K(dirty_thresh), 90 K(dirty_thresh),
91 K(background_thresh), 91 K(background_thresh),
92 (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)), 92 (unsigned long) K(wb_stat(wb, WB_DIRTIED)),
93 (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), 93 (unsigned long) K(wb_stat(wb, WB_WRITTEN)),
94 (unsigned long) K(bdi->write_bandwidth), 94 (unsigned long) K(bdi->write_bandwidth),
95 nr_dirty, 95 nr_dirty,
96 nr_io, 96 nr_io,
@@ -376,8 +376,10 @@ void bdi_unregister(struct backing_dev_info *bdi)
376} 376}
377EXPORT_SYMBOL(bdi_unregister); 377EXPORT_SYMBOL(bdi_unregister);
378 378
379static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) 379static int bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
380{ 380{
381 int i, err;
382
381 memset(wb, 0, sizeof(*wb)); 383 memset(wb, 0, sizeof(*wb));
382 384
383 wb->bdi = bdi; 385 wb->bdi = bdi;
@@ -388,6 +390,27 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
388 INIT_LIST_HEAD(&wb->b_dirty_time); 390 INIT_LIST_HEAD(&wb->b_dirty_time);
389 spin_lock_init(&wb->list_lock); 391 spin_lock_init(&wb->list_lock);
390 INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn); 392 INIT_DELAYED_WORK(&wb->dwork, bdi_writeback_workfn);
393
394 for (i = 0; i < NR_WB_STAT_ITEMS; i++) {
395 err = percpu_counter_init(&wb->stat[i], 0, GFP_KERNEL);
396 if (err) {
397 while (--i)
398 percpu_counter_destroy(&wb->stat[i]);
399 return err;
400 }
401 }
402
403 return 0;
404}
405
406static void bdi_wb_exit(struct bdi_writeback *wb)
407{
408 int i;
409
410 WARN_ON(delayed_work_pending(&wb->dwork));
411
412 for (i = 0; i < NR_WB_STAT_ITEMS; i++)
413 percpu_counter_destroy(&wb->stat[i]);
391} 414}
392 415
393/* 416/*
@@ -397,7 +420,7 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
397 420
398int bdi_init(struct backing_dev_info *bdi) 421int bdi_init(struct backing_dev_info *bdi)
399{ 422{
400 int i, err; 423 int err;
401 424
402 bdi->dev = NULL; 425 bdi->dev = NULL;
403 426
@@ -408,13 +431,9 @@ int bdi_init(struct backing_dev_info *bdi)
408 INIT_LIST_HEAD(&bdi->bdi_list); 431 INIT_LIST_HEAD(&bdi->bdi_list);
409 INIT_LIST_HEAD(&bdi->work_list); 432 INIT_LIST_HEAD(&bdi->work_list);
410 433
411 bdi_wb_init(&bdi->wb, bdi); 434 err = bdi_wb_init(&bdi->wb, bdi);
412 435 if (err)
413 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) { 436 return err;
414 err = percpu_counter_init(&bdi->bdi_stat[i], 0, GFP_KERNEL);
415 if (err)
416 goto err;
417 }
418 437
419 bdi->dirty_exceeded = 0; 438 bdi->dirty_exceeded = 0;
420 439
@@ -427,25 +446,20 @@ int bdi_init(struct backing_dev_info *bdi)
427 bdi->avg_write_bandwidth = INIT_BW; 446 bdi->avg_write_bandwidth = INIT_BW;
428 447
429 err = fprop_local_init_percpu(&bdi->completions, GFP_KERNEL); 448 err = fprop_local_init_percpu(&bdi->completions, GFP_KERNEL);
430
431 if (err) { 449 if (err) {
432err: 450 bdi_wb_exit(&bdi->wb);
433 while (i--) 451 return err;
434 percpu_counter_destroy(&bdi->bdi_stat[i]);
435 } 452 }
436 453
437 return err; 454 return 0;
438} 455}
439EXPORT_SYMBOL(bdi_init); 456EXPORT_SYMBOL(bdi_init);
440 457
441void bdi_destroy(struct backing_dev_info *bdi) 458void bdi_destroy(struct backing_dev_info *bdi)
442{ 459{
443 int i;
444
445 bdi_wb_shutdown(bdi); 460 bdi_wb_shutdown(bdi);
446 461
447 WARN_ON(!list_empty(&bdi->work_list)); 462 WARN_ON(!list_empty(&bdi->work_list));
448 WARN_ON(delayed_work_pending(&bdi->wb.dwork));
449 463
450 if (bdi->dev) { 464 if (bdi->dev) {
451 bdi_debug_unregister(bdi); 465 bdi_debug_unregister(bdi);
@@ -453,8 +467,8 @@ void bdi_destroy(struct backing_dev_info *bdi)
453 bdi->dev = NULL; 467 bdi->dev = NULL;
454 } 468 }
455 469
456 for (i = 0; i < NR_BDI_STAT_ITEMS; i++) 470 bdi_wb_exit(&bdi->wb);
457 percpu_counter_destroy(&bdi->bdi_stat[i]); 471
458 fprop_local_destroy_percpu(&bdi->completions); 472 fprop_local_destroy_percpu(&bdi->completions);
459} 473}
460EXPORT_SYMBOL(bdi_destroy); 474EXPORT_SYMBOL(bdi_destroy);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index bdeecad00489..dc673a035413 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -396,11 +396,11 @@ static unsigned long wp_next_time(unsigned long cur_time)
396 * Increment the BDI's writeout completion count and the global writeout 396 * Increment the BDI's writeout completion count and the global writeout
397 * completion count. Called from test_clear_page_writeback(). 397 * completion count. Called from test_clear_page_writeback().
398 */ 398 */
399static inline void __bdi_writeout_inc(struct backing_dev_info *bdi) 399static inline void __wb_writeout_inc(struct bdi_writeback *wb)
400{ 400{
401 __inc_bdi_stat(bdi, BDI_WRITTEN); 401 __inc_wb_stat(wb, WB_WRITTEN);
402 __fprop_inc_percpu_max(&writeout_completions, &bdi->completions, 402 __fprop_inc_percpu_max(&writeout_completions, &wb->bdi->completions,
403 bdi->max_prop_frac); 403 wb->bdi->max_prop_frac);
404 /* First event after period switching was turned off? */ 404 /* First event after period switching was turned off? */
405 if (!unlikely(writeout_period_time)) { 405 if (!unlikely(writeout_period_time)) {
406 /* 406 /*
@@ -414,15 +414,15 @@ static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
414 } 414 }
415} 415}
416 416
417void bdi_writeout_inc(struct backing_dev_info *bdi) 417void wb_writeout_inc(struct bdi_writeback *wb)
418{ 418{
419 unsigned long flags; 419 unsigned long flags;
420 420
421 local_irq_save(flags); 421 local_irq_save(flags);
422 __bdi_writeout_inc(bdi); 422 __wb_writeout_inc(wb);
423 local_irq_restore(flags); 423 local_irq_restore(flags);
424} 424}
425EXPORT_SYMBOL_GPL(bdi_writeout_inc); 425EXPORT_SYMBOL_GPL(wb_writeout_inc);
426 426
427/* 427/*
428 * Obtain an accurate fraction of the BDI's portion. 428 * Obtain an accurate fraction of the BDI's portion.
@@ -1130,8 +1130,8 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
1130 if (elapsed < BANDWIDTH_INTERVAL) 1130 if (elapsed < BANDWIDTH_INTERVAL)
1131 return; 1131 return;
1132 1132
1133 dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); 1133 dirtied = percpu_counter_read(&bdi->wb.stat[WB_DIRTIED]);
1134 written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); 1134 written = percpu_counter_read(&bdi->wb.stat[WB_WRITTEN]);
1135 1135
1136 /* 1136 /*
1137 * Skip quiet periods when disk bandwidth is under-utilized. 1137 * Skip quiet periods when disk bandwidth is under-utilized.
@@ -1288,7 +1288,8 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
1288 unsigned long *bdi_thresh, 1288 unsigned long *bdi_thresh,
1289 unsigned long *bdi_bg_thresh) 1289 unsigned long *bdi_bg_thresh)
1290{ 1290{
1291 unsigned long bdi_reclaimable; 1291 struct bdi_writeback *wb = &bdi->wb;
1292 unsigned long wb_reclaimable;
1292 1293
1293 /* 1294 /*
1294 * bdi_thresh is not treated as some limiting factor as 1295 * bdi_thresh is not treated as some limiting factor as
@@ -1320,14 +1321,12 @@ static inline void bdi_dirty_limits(struct backing_dev_info *bdi,
1320 * actually dirty; with m+n sitting in the percpu 1321 * actually dirty; with m+n sitting in the percpu
1321 * deltas. 1322 * deltas.
1322 */ 1323 */
1323 if (*bdi_thresh < 2 * bdi_stat_error(bdi)) { 1324 if (*bdi_thresh < 2 * wb_stat_error(wb)) {
1324 bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); 1325 wb_reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE);
1325 *bdi_dirty = bdi_reclaimable + 1326 *bdi_dirty = wb_reclaimable + wb_stat_sum(wb, WB_WRITEBACK);
1326 bdi_stat_sum(bdi, BDI_WRITEBACK);
1327 } else { 1327 } else {
1328 bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); 1328 wb_reclaimable = wb_stat(wb, WB_RECLAIMABLE);
1329 *bdi_dirty = bdi_reclaimable + 1329 *bdi_dirty = wb_reclaimable + wb_stat(wb, WB_WRITEBACK);
1330 bdi_stat(bdi, BDI_WRITEBACK);
1331 } 1330 }
1332} 1331}
1333 1332
@@ -1514,9 +1513,9 @@ pause:
1514 * In theory 1 page is enough to keep the comsumer-producer 1513 * In theory 1 page is enough to keep the comsumer-producer
1515 * pipe going: the flusher cleans 1 page => the task dirties 1 1514 * pipe going: the flusher cleans 1 page => the task dirties 1
1516 * more page. However bdi_dirty has accounting errors. So use 1515 * more page. However bdi_dirty has accounting errors. So use
1517 * the larger and more IO friendly bdi_stat_error. 1516 * the larger and more IO friendly wb_stat_error.
1518 */ 1517 */
1519 if (bdi_dirty <= bdi_stat_error(bdi)) 1518 if (bdi_dirty <= wb_stat_error(&bdi->wb))
1520 break; 1519 break;
1521 1520
1522 if (fatal_signal_pending(current)) 1521 if (fatal_signal_pending(current))
@@ -2106,8 +2105,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping,
2106 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2105 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2107 __inc_zone_page_state(page, NR_FILE_DIRTY); 2106 __inc_zone_page_state(page, NR_FILE_DIRTY);
2108 __inc_zone_page_state(page, NR_DIRTIED); 2107 __inc_zone_page_state(page, NR_DIRTIED);
2109 __inc_bdi_stat(bdi, BDI_RECLAIMABLE); 2108 __inc_wb_stat(&bdi->wb, WB_RECLAIMABLE);
2110 __inc_bdi_stat(bdi, BDI_DIRTIED); 2109 __inc_wb_stat(&bdi->wb, WB_DIRTIED);
2111 task_io_account_write(PAGE_CACHE_SIZE); 2110 task_io_account_write(PAGE_CACHE_SIZE);
2112 current->nr_dirtied++; 2111 current->nr_dirtied++;
2113 this_cpu_inc(bdp_ratelimits); 2112 this_cpu_inc(bdp_ratelimits);
@@ -2126,7 +2125,7 @@ void account_page_cleaned(struct page *page, struct address_space *mapping,
2126 if (mapping_cap_account_dirty(mapping)) { 2125 if (mapping_cap_account_dirty(mapping)) {
2127 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2126 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2128 dec_zone_page_state(page, NR_FILE_DIRTY); 2127 dec_zone_page_state(page, NR_FILE_DIRTY);
2129 dec_bdi_stat(inode_to_bdi(mapping->host), BDI_RECLAIMABLE); 2128 dec_wb_stat(&inode_to_bdi(mapping->host)->wb, WB_RECLAIMABLE);
2130 task_io_account_cancelled_write(PAGE_CACHE_SIZE); 2129 task_io_account_cancelled_write(PAGE_CACHE_SIZE);
2131 } 2130 }
2132} 2131}
@@ -2190,7 +2189,7 @@ void account_page_redirty(struct page *page)
2190 if (mapping && mapping_cap_account_dirty(mapping)) { 2189 if (mapping && mapping_cap_account_dirty(mapping)) {
2191 current->nr_dirtied--; 2190 current->nr_dirtied--;
2192 dec_zone_page_state(page, NR_DIRTIED); 2191 dec_zone_page_state(page, NR_DIRTIED);
2193 dec_bdi_stat(inode_to_bdi(mapping->host), BDI_DIRTIED); 2192 dec_wb_stat(&inode_to_bdi(mapping->host)->wb, WB_DIRTIED);
2194 } 2193 }
2195} 2194}
2196EXPORT_SYMBOL(account_page_redirty); 2195EXPORT_SYMBOL(account_page_redirty);
@@ -2369,8 +2368,8 @@ int clear_page_dirty_for_io(struct page *page)
2369 if (TestClearPageDirty(page)) { 2368 if (TestClearPageDirty(page)) {
2370 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY); 2369 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_DIRTY);
2371 dec_zone_page_state(page, NR_FILE_DIRTY); 2370 dec_zone_page_state(page, NR_FILE_DIRTY);
2372 dec_bdi_stat(inode_to_bdi(mapping->host), 2371 dec_wb_stat(&inode_to_bdi(mapping->host)->wb,
2373 BDI_RECLAIMABLE); 2372 WB_RECLAIMABLE);
2374 ret = 1; 2373 ret = 1;
2375 } 2374 }
2376 mem_cgroup_end_page_stat(memcg); 2375 mem_cgroup_end_page_stat(memcg);
@@ -2398,8 +2397,8 @@ int test_clear_page_writeback(struct page *page)
2398 page_index(page), 2397 page_index(page),
2399 PAGECACHE_TAG_WRITEBACK); 2398 PAGECACHE_TAG_WRITEBACK);
2400 if (bdi_cap_account_writeback(bdi)) { 2399 if (bdi_cap_account_writeback(bdi)) {
2401 __dec_bdi_stat(bdi, BDI_WRITEBACK); 2400 __dec_wb_stat(&bdi->wb, WB_WRITEBACK);
2402 __bdi_writeout_inc(bdi); 2401 __wb_writeout_inc(&bdi->wb);
2403 } 2402 }
2404 } 2403 }
2405 spin_unlock_irqrestore(&mapping->tree_lock, flags); 2404 spin_unlock_irqrestore(&mapping->tree_lock, flags);
@@ -2433,7 +2432,7 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2433 page_index(page), 2432 page_index(page),
2434 PAGECACHE_TAG_WRITEBACK); 2433 PAGECACHE_TAG_WRITEBACK);
2435 if (bdi_cap_account_writeback(bdi)) 2434 if (bdi_cap_account_writeback(bdi))
2436 __inc_bdi_stat(bdi, BDI_WRITEBACK); 2435 __inc_wb_stat(&bdi->wb, WB_WRITEBACK);
2437 } 2436 }
2438 if (!PageDirty(page)) 2437 if (!PageDirty(page))
2439 radix_tree_tag_clear(&mapping->page_tree, 2438 radix_tree_tag_clear(&mapping->page_tree,