aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c300
1 files changed, 251 insertions, 49 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d821321326e..7845462064f 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2,6 +2,7 @@
2 * mm/page-writeback.c 2 * mm/page-writeback.c
3 * 3 *
4 * Copyright (C) 2002, Linus Torvalds. 4 * Copyright (C) 2002, Linus Torvalds.
5 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
5 * 6 *
6 * Contains functions related to writing back dirty pages at the 7 * Contains functions related to writing back dirty pages at the
7 * address_space level. 8 * address_space level.
@@ -36,7 +37,7 @@
36 37
37/* 38/*
38 * The maximum number of pages to writeout in a single bdflush/kupdate 39 * The maximum number of pages to writeout in a single bdflush/kupdate
39 * operation. We do this so we don't hold I_LOCK against an inode for 40 * operation. We do this so we don't hold I_SYNC against an inode for
40 * enormous amounts of time, which would block a userspace task which has 41 * enormous amounts of time, which would block a userspace task which has
41 * been forced to throttle against that inode. Also, the code reevaluates 42 * been forced to throttle against that inode. Also, the code reevaluates
42 * the dirty each time it has written this many pages. 43 * the dirty each time it has written this many pages.
@@ -49,8 +50,6 @@
49 */ 50 */
50static long ratelimit_pages = 32; 51static long ratelimit_pages = 32;
51 52
52static int dirty_exceeded __cacheline_aligned_in_smp; /* Dirty mem may be over limit */
53
54/* 53/*
55 * When balance_dirty_pages decides that the caller needs to perform some 54 * When balance_dirty_pages decides that the caller needs to perform some
56 * non-background writeback, this is how many pages it will attempt to write. 55 * non-background writeback, this is how many pages it will attempt to write.
@@ -103,6 +102,141 @@ EXPORT_SYMBOL(laptop_mode);
103static void background_writeout(unsigned long _min_pages); 102static void background_writeout(unsigned long _min_pages);
104 103
105/* 104/*
105 * Scale the writeback cache size proportional to the relative writeout speeds.
106 *
107 * We do this by keeping a floating proportion between BDIs, based on page
108 * writeback completions [end_page_writeback()]. Those devices that write out
109 * pages fastest will get the larger share, while the slower will get a smaller
110 * share.
111 *
112 * We use page writeout completions because we are interested in getting rid of
113 * dirty pages. Having them written out is the primary goal.
114 *
115 * We introduce a concept of time, a period over which we measure these events,
116 * because demand can/will vary over time. The length of this period itself is
117 * measured in page writeback completions.
118 *
119 */
120static struct prop_descriptor vm_completions;
121static struct prop_descriptor vm_dirties;
122
123static unsigned long determine_dirtyable_memory(void);
124
125/*
126 * couple the period to the dirty_ratio:
127 *
128 * period/2 ~ roundup_pow_of_two(dirty limit)
129 */
130static int calc_period_shift(void)
131{
132 unsigned long dirty_total;
133
134 dirty_total = (vm_dirty_ratio * determine_dirtyable_memory()) / 100;
135 return 2 + ilog2(dirty_total - 1);
136}
137
138/*
139 * update the period when the dirty ratio changes.
140 */
141int dirty_ratio_handler(struct ctl_table *table, int write,
142 struct file *filp, void __user *buffer, size_t *lenp,
143 loff_t *ppos)
144{
145 int old_ratio = vm_dirty_ratio;
146 int ret = proc_dointvec_minmax(table, write, filp, buffer, lenp, ppos);
147 if (ret == 0 && write && vm_dirty_ratio != old_ratio) {
148 int shift = calc_period_shift();
149 prop_change_shift(&vm_completions, shift);
150 prop_change_shift(&vm_dirties, shift);
151 }
152 return ret;
153}
154
155/*
156 * Increment the BDI's writeout completion count and the global writeout
157 * completion count. Called from test_clear_page_writeback().
158 */
159static inline void __bdi_writeout_inc(struct backing_dev_info *bdi)
160{
161 __prop_inc_percpu(&vm_completions, &bdi->completions);
162}
163
164static inline void task_dirty_inc(struct task_struct *tsk)
165{
166 prop_inc_single(&vm_dirties, &tsk->dirties);
167}
168
169/*
170 * Obtain an accurate fraction of the BDI's portion.
171 */
172static void bdi_writeout_fraction(struct backing_dev_info *bdi,
173 long *numerator, long *denominator)
174{
175 if (bdi_cap_writeback_dirty(bdi)) {
176 prop_fraction_percpu(&vm_completions, &bdi->completions,
177 numerator, denominator);
178 } else {
179 *numerator = 0;
180 *denominator = 1;
181 }
182}
183
184/*
185 * Clip the earned share of dirty pages to that which is actually available.
186 * This avoids exceeding the total dirty_limit when the floating averages
187 * fluctuate too quickly.
188 */
189static void
190clip_bdi_dirty_limit(struct backing_dev_info *bdi, long dirty, long *pbdi_dirty)
191{
192 long avail_dirty;
193
194 avail_dirty = dirty -
195 (global_page_state(NR_FILE_DIRTY) +
196 global_page_state(NR_WRITEBACK) +
197 global_page_state(NR_UNSTABLE_NFS));
198
199 if (avail_dirty < 0)
200 avail_dirty = 0;
201
202 avail_dirty += bdi_stat(bdi, BDI_RECLAIMABLE) +
203 bdi_stat(bdi, BDI_WRITEBACK);
204
205 *pbdi_dirty = min(*pbdi_dirty, avail_dirty);
206}
207
208static inline void task_dirties_fraction(struct task_struct *tsk,
209 long *numerator, long *denominator)
210{
211 prop_fraction_single(&vm_dirties, &tsk->dirties,
212 numerator, denominator);
213}
214
215/*
216 * scale the dirty limit
217 *
218 * task specific dirty limit:
219 *
220 * dirty -= (dirty/8) * p_{t}
221 */
222void task_dirty_limit(struct task_struct *tsk, long *pdirty)
223{
224 long numerator, denominator;
225 long dirty = *pdirty;
226 u64 inv = dirty >> 3;
227
228 task_dirties_fraction(tsk, &numerator, &denominator);
229 inv *= numerator;
230 do_div(inv, denominator);
231
232 dirty -= inv;
233 if (dirty < *pdirty/2)
234 dirty = *pdirty/2;
235
236 *pdirty = dirty;
237}
238
239/*
106 * Work out the current dirty-memory clamping and background writeout 240 * Work out the current dirty-memory clamping and background writeout
107 * thresholds. 241 * thresholds.
108 * 242 *
@@ -158,8 +292,8 @@ static unsigned long determine_dirtyable_memory(void)
158} 292}
159 293
160static void 294static void
161get_dirty_limits(long *pbackground, long *pdirty, 295get_dirty_limits(long *pbackground, long *pdirty, long *pbdi_dirty,
162 struct address_space *mapping) 296 struct backing_dev_info *bdi)
163{ 297{
164 int background_ratio; /* Percentages */ 298 int background_ratio; /* Percentages */
165 int dirty_ratio; 299 int dirty_ratio;
@@ -193,6 +327,23 @@ get_dirty_limits(long *pbackground, long *pdirty,
193 } 327 }
194 *pbackground = background; 328 *pbackground = background;
195 *pdirty = dirty; 329 *pdirty = dirty;
330
331 if (bdi) {
332 u64 bdi_dirty = dirty;
333 long numerator, denominator;
334
335 /*
336 * Calculate this BDI's share of the dirty ratio.
337 */
338 bdi_writeout_fraction(bdi, &numerator, &denominator);
339
340 bdi_dirty *= numerator;
341 do_div(bdi_dirty, denominator);
342
343 *pbdi_dirty = bdi_dirty;
344 clip_bdi_dirty_limit(bdi, dirty, pbdi_dirty);
345 task_dirty_limit(current, pbdi_dirty);
346 }
196} 347}
197 348
198/* 349/*
@@ -204,9 +355,11 @@ get_dirty_limits(long *pbackground, long *pdirty,
204 */ 355 */
205static void balance_dirty_pages(struct address_space *mapping) 356static void balance_dirty_pages(struct address_space *mapping)
206{ 357{
207 long nr_reclaimable; 358 long bdi_nr_reclaimable;
359 long bdi_nr_writeback;
208 long background_thresh; 360 long background_thresh;
209 long dirty_thresh; 361 long dirty_thresh;
362 long bdi_thresh;
210 unsigned long pages_written = 0; 363 unsigned long pages_written = 0;
211 unsigned long write_chunk = sync_writeback_pages(); 364 unsigned long write_chunk = sync_writeback_pages();
212 365
@@ -221,15 +374,15 @@ static void balance_dirty_pages(struct address_space *mapping)
221 .range_cyclic = 1, 374 .range_cyclic = 1,
222 }; 375 };
223 376
224 get_dirty_limits(&background_thresh, &dirty_thresh, mapping); 377 get_dirty_limits(&background_thresh, &dirty_thresh,
225 nr_reclaimable = global_page_state(NR_FILE_DIRTY) + 378 &bdi_thresh, bdi);
226 global_page_state(NR_UNSTABLE_NFS); 379 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
227 if (nr_reclaimable + global_page_state(NR_WRITEBACK) <= 380 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
228 dirty_thresh) 381 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
229 break; 382 break;
230 383
231 if (!dirty_exceeded) 384 if (!bdi->dirty_exceeded)
232 dirty_exceeded = 1; 385 bdi->dirty_exceeded = 1;
233 386
234 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. 387 /* Note: nr_reclaimable denotes nr_dirty + nr_unstable.
235 * Unstable writes are a feature of certain networked 388 * Unstable writes are a feature of certain networked
@@ -237,26 +390,42 @@ static void balance_dirty_pages(struct address_space *mapping)
237 * written to the server's write cache, but has not yet 390 * written to the server's write cache, but has not yet
238 * been flushed to permanent storage. 391 * been flushed to permanent storage.
239 */ 392 */
240 if (nr_reclaimable) { 393 if (bdi_nr_reclaimable) {
241 writeback_inodes(&wbc); 394 writeback_inodes(&wbc);
242 get_dirty_limits(&background_thresh,
243 &dirty_thresh, mapping);
244 nr_reclaimable = global_page_state(NR_FILE_DIRTY) +
245 global_page_state(NR_UNSTABLE_NFS);
246 if (nr_reclaimable +
247 global_page_state(NR_WRITEBACK)
248 <= dirty_thresh)
249 break;
250 pages_written += write_chunk - wbc.nr_to_write; 395 pages_written += write_chunk - wbc.nr_to_write;
251 if (pages_written >= write_chunk) 396 get_dirty_limits(&background_thresh, &dirty_thresh,
252 break; /* We've done our duty */ 397 &bdi_thresh, bdi);
398 }
399
400 /*
401 * In order to avoid the stacked BDI deadlock we need
402 * to ensure we accurately count the 'dirty' pages when
403 * the threshold is low.
404 *
405 * Otherwise it would be possible to get thresh+n pages
406 * reported dirty, even though there are thresh-m pages
407 * actually dirty; with m+n sitting in the percpu
408 * deltas.
409 */
410 if (bdi_thresh < 2*bdi_stat_error(bdi)) {
411 bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE);
412 bdi_nr_writeback = bdi_stat_sum(bdi, BDI_WRITEBACK);
413 } else if (bdi_nr_reclaimable) {
414 bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE);
415 bdi_nr_writeback = bdi_stat(bdi, BDI_WRITEBACK);
253 } 416 }
417
418 if (bdi_nr_reclaimable + bdi_nr_writeback <= bdi_thresh)
419 break;
420 if (pages_written >= write_chunk)
421 break; /* We've done our duty */
422
254 congestion_wait(WRITE, HZ/10); 423 congestion_wait(WRITE, HZ/10);
255 } 424 }
256 425
257 if (nr_reclaimable + global_page_state(NR_WRITEBACK) 426 if (bdi_nr_reclaimable + bdi_nr_writeback < bdi_thresh &&
258 <= dirty_thresh && dirty_exceeded) 427 bdi->dirty_exceeded)
259 dirty_exceeded = 0; 428 bdi->dirty_exceeded = 0;
260 429
261 if (writeback_in_progress(bdi)) 430 if (writeback_in_progress(bdi))
262 return; /* pdflush is already working this queue */ 431 return; /* pdflush is already working this queue */
@@ -270,7 +439,9 @@ static void balance_dirty_pages(struct address_space *mapping)
270 * background_thresh, to keep the amount of dirty memory low. 439 * background_thresh, to keep the amount of dirty memory low.
271 */ 440 */
272 if ((laptop_mode && pages_written) || 441 if ((laptop_mode && pages_written) ||
273 (!laptop_mode && (nr_reclaimable > background_thresh))) 442 (!laptop_mode && (global_page_state(NR_FILE_DIRTY)
443 + global_page_state(NR_UNSTABLE_NFS)
444 > background_thresh)))
274 pdflush_operation(background_writeout, 0); 445 pdflush_operation(background_writeout, 0);
275} 446}
276 447
@@ -306,7 +477,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
306 unsigned long *p; 477 unsigned long *p;
307 478
308 ratelimit = ratelimit_pages; 479 ratelimit = ratelimit_pages;
309 if (dirty_exceeded) 480 if (mapping->backing_dev_info->dirty_exceeded)
310 ratelimit = 8; 481 ratelimit = 8;
311 482
312 /* 483 /*
@@ -331,18 +502,8 @@ void throttle_vm_writeout(gfp_t gfp_mask)
331 long background_thresh; 502 long background_thresh;
332 long dirty_thresh; 503 long dirty_thresh;
333 504
334 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO)) {
335 /*
336 * The caller might hold locks which can prevent IO completion
337 * or progress in the filesystem. So we cannot just sit here
338 * waiting for IO to complete.
339 */
340 congestion_wait(WRITE, HZ/10);
341 return;
342 }
343
344 for ( ; ; ) { 505 for ( ; ; ) {
345 get_dirty_limits(&background_thresh, &dirty_thresh, NULL); 506 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
346 507
347 /* 508 /*
348 * Boost the allowable dirty threshold a bit for page 509 * Boost the allowable dirty threshold a bit for page
@@ -354,6 +515,14 @@ void throttle_vm_writeout(gfp_t gfp_mask)
354 global_page_state(NR_WRITEBACK) <= dirty_thresh) 515 global_page_state(NR_WRITEBACK) <= dirty_thresh)
355 break; 516 break;
356 congestion_wait(WRITE, HZ/10); 517 congestion_wait(WRITE, HZ/10);
518
519 /*
520 * The caller might hold locks which can prevent IO completion
521 * or progress in the filesystem. So we cannot just sit here
522 * waiting for IO to complete.
523 */
524 if ((gfp_mask & (__GFP_FS|__GFP_IO)) != (__GFP_FS|__GFP_IO))
525 break;
357 } 526 }
358} 527}
359 528
@@ -377,11 +546,12 @@ static void background_writeout(unsigned long _min_pages)
377 long background_thresh; 546 long background_thresh;
378 long dirty_thresh; 547 long dirty_thresh;
379 548
380 get_dirty_limits(&background_thresh, &dirty_thresh, NULL); 549 get_dirty_limits(&background_thresh, &dirty_thresh, NULL, NULL);
381 if (global_page_state(NR_FILE_DIRTY) + 550 if (global_page_state(NR_FILE_DIRTY) +
382 global_page_state(NR_UNSTABLE_NFS) < background_thresh 551 global_page_state(NR_UNSTABLE_NFS) < background_thresh
383 && min_pages <= 0) 552 && min_pages <= 0)
384 break; 553 break;
554 wbc.more_io = 0;
385 wbc.encountered_congestion = 0; 555 wbc.encountered_congestion = 0;
386 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 556 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
387 wbc.pages_skipped = 0; 557 wbc.pages_skipped = 0;
@@ -389,8 +559,9 @@ static void background_writeout(unsigned long _min_pages)
389 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write; 559 min_pages -= MAX_WRITEBACK_PAGES - wbc.nr_to_write;
390 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) { 560 if (wbc.nr_to_write > 0 || wbc.pages_skipped > 0) {
391 /* Wrote less than expected */ 561 /* Wrote less than expected */
392 congestion_wait(WRITE, HZ/10); 562 if (wbc.encountered_congestion || wbc.more_io)
393 if (!wbc.encountered_congestion) 563 congestion_wait(WRITE, HZ/10);
564 else
394 break; 565 break;
395 } 566 }
396 } 567 }
@@ -455,11 +626,12 @@ static void wb_kupdate(unsigned long arg)
455 global_page_state(NR_UNSTABLE_NFS) + 626 global_page_state(NR_UNSTABLE_NFS) +
456 (inodes_stat.nr_inodes - inodes_stat.nr_unused); 627 (inodes_stat.nr_inodes - inodes_stat.nr_unused);
457 while (nr_to_write > 0) { 628 while (nr_to_write > 0) {
629 wbc.more_io = 0;
458 wbc.encountered_congestion = 0; 630 wbc.encountered_congestion = 0;
459 wbc.nr_to_write = MAX_WRITEBACK_PAGES; 631 wbc.nr_to_write = MAX_WRITEBACK_PAGES;
460 writeback_inodes(&wbc); 632 writeback_inodes(&wbc);
461 if (wbc.nr_to_write > 0) { 633 if (wbc.nr_to_write > 0) {
462 if (wbc.encountered_congestion) 634 if (wbc.encountered_congestion || wbc.more_io)
463 congestion_wait(WRITE, HZ/10); 635 congestion_wait(WRITE, HZ/10);
464 else 636 else
465 break; /* All the old data is written */ 637 break; /* All the old data is written */
@@ -580,9 +752,15 @@ static struct notifier_block __cpuinitdata ratelimit_nb = {
580 */ 752 */
581void __init page_writeback_init(void) 753void __init page_writeback_init(void)
582{ 754{
755 int shift;
756
583 mod_timer(&wb_timer, jiffies + dirty_writeback_interval); 757 mod_timer(&wb_timer, jiffies + dirty_writeback_interval);
584 writeback_set_ratelimit(); 758 writeback_set_ratelimit();
585 register_cpu_notifier(&ratelimit_nb); 759 register_cpu_notifier(&ratelimit_nb);
760
761 shift = calc_period_shift();
762 prop_descriptor_init(&vm_completions, shift);
763 prop_descriptor_init(&vm_dirties, shift);
586} 764}
587 765
588/** 766/**
@@ -672,8 +850,10 @@ retry:
672 850
673 ret = (*writepage)(page, wbc, data); 851 ret = (*writepage)(page, wbc, data);
674 852
675 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) 853 if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
676 unlock_page(page); 854 unlock_page(page);
855 ret = 0;
856 }
677 if (ret || (--(wbc->nr_to_write) <= 0)) 857 if (ret || (--(wbc->nr_to_write) <= 0))
678 done = 1; 858 done = 1;
679 if (wbc->nonblocking && bdi_write_congested(bdi)) { 859 if (wbc->nonblocking && bdi_write_congested(bdi)) {
@@ -827,6 +1007,8 @@ int __set_page_dirty_nobuffers(struct page *page)
827 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); 1007 WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
828 if (mapping_cap_account_dirty(mapping)) { 1008 if (mapping_cap_account_dirty(mapping)) {
829 __inc_zone_page_state(page, NR_FILE_DIRTY); 1009 __inc_zone_page_state(page, NR_FILE_DIRTY);
1010 __inc_bdi_stat(mapping->backing_dev_info,
1011 BDI_RECLAIMABLE);
830 task_io_account_write(PAGE_CACHE_SIZE); 1012 task_io_account_write(PAGE_CACHE_SIZE);
831 } 1013 }
832 radix_tree_tag_set(&mapping->page_tree, 1014 radix_tree_tag_set(&mapping->page_tree,
@@ -859,7 +1041,7 @@ EXPORT_SYMBOL(redirty_page_for_writepage);
859 * If the mapping doesn't provide a set_page_dirty a_op, then 1041 * If the mapping doesn't provide a set_page_dirty a_op, then
860 * just fall through and assume that it wants buffer_heads. 1042 * just fall through and assume that it wants buffer_heads.
861 */ 1043 */
862int fastcall set_page_dirty(struct page *page) 1044static int __set_page_dirty(struct page *page)
863{ 1045{
864 struct address_space *mapping = page_mapping(page); 1046 struct address_space *mapping = page_mapping(page);
865 1047
@@ -877,6 +1059,14 @@ int fastcall set_page_dirty(struct page *page)
877 } 1059 }
878 return 0; 1060 return 0;
879} 1061}
1062
1063int fastcall set_page_dirty(struct page *page)
1064{
1065 int ret = __set_page_dirty(page);
1066 if (ret)
1067 task_dirty_inc(current);
1068 return ret;
1069}
880EXPORT_SYMBOL(set_page_dirty); 1070EXPORT_SYMBOL(set_page_dirty);
881 1071
882/* 1072/*
@@ -961,6 +1151,8 @@ int clear_page_dirty_for_io(struct page *page)
961 */ 1151 */
962 if (TestClearPageDirty(page)) { 1152 if (TestClearPageDirty(page)) {
963 dec_zone_page_state(page, NR_FILE_DIRTY); 1153 dec_zone_page_state(page, NR_FILE_DIRTY);
1154 dec_bdi_stat(mapping->backing_dev_info,
1155 BDI_RECLAIMABLE);
964 return 1; 1156 return 1;
965 } 1157 }
966 return 0; 1158 return 0;
@@ -975,14 +1167,20 @@ int test_clear_page_writeback(struct page *page)
975 int ret; 1167 int ret;
976 1168
977 if (mapping) { 1169 if (mapping) {
1170 struct backing_dev_info *bdi = mapping->backing_dev_info;
978 unsigned long flags; 1171 unsigned long flags;
979 1172
980 write_lock_irqsave(&mapping->tree_lock, flags); 1173 write_lock_irqsave(&mapping->tree_lock, flags);
981 ret = TestClearPageWriteback(page); 1174 ret = TestClearPageWriteback(page);
982 if (ret) 1175 if (ret) {
983 radix_tree_tag_clear(&mapping->page_tree, 1176 radix_tree_tag_clear(&mapping->page_tree,
984 page_index(page), 1177 page_index(page),
985 PAGECACHE_TAG_WRITEBACK); 1178 PAGECACHE_TAG_WRITEBACK);
1179 if (bdi_cap_writeback_dirty(bdi)) {
1180 __dec_bdi_stat(bdi, BDI_WRITEBACK);
1181 __bdi_writeout_inc(bdi);
1182 }
1183 }
986 write_unlock_irqrestore(&mapping->tree_lock, flags); 1184 write_unlock_irqrestore(&mapping->tree_lock, flags);
987 } else { 1185 } else {
988 ret = TestClearPageWriteback(page); 1186 ret = TestClearPageWriteback(page);
@@ -998,14 +1196,18 @@ int test_set_page_writeback(struct page *page)
998 int ret; 1196 int ret;
999 1197
1000 if (mapping) { 1198 if (mapping) {
1199 struct backing_dev_info *bdi = mapping->backing_dev_info;
1001 unsigned long flags; 1200 unsigned long flags;
1002 1201
1003 write_lock_irqsave(&mapping->tree_lock, flags); 1202 write_lock_irqsave(&mapping->tree_lock, flags);
1004 ret = TestSetPageWriteback(page); 1203 ret = TestSetPageWriteback(page);
1005 if (!ret) 1204 if (!ret) {
1006 radix_tree_tag_set(&mapping->page_tree, 1205 radix_tree_tag_set(&mapping->page_tree,
1007 page_index(page), 1206 page_index(page),
1008 PAGECACHE_TAG_WRITEBACK); 1207 PAGECACHE_TAG_WRITEBACK);
1208 if (bdi_cap_writeback_dirty(bdi))
1209 __inc_bdi_stat(bdi, BDI_WRITEBACK);
1210 }
1009 if (!PageDirty(page)) 1211 if (!PageDirty(page))
1010 radix_tree_tag_clear(&mapping->page_tree, 1212 radix_tree_tag_clear(&mapping->page_tree,
1011 page_index(page), 1213 page_index(page),