diff options
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r-- | mm/page-writeback.c | 100 |
1 files changed, 44 insertions, 56 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 75d7f48b79bb..e630188ccc40 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -99,22 +99,6 @@ EXPORT_SYMBOL(laptop_mode); | |||
99 | 99 | ||
100 | static void background_writeout(unsigned long _min_pages); | 100 | static void background_writeout(unsigned long _min_pages); |
101 | 101 | ||
102 | struct writeback_state | ||
103 | { | ||
104 | unsigned long nr_dirty; | ||
105 | unsigned long nr_unstable; | ||
106 | unsigned long nr_mapped; | ||
107 | unsigned long nr_writeback; | ||
108 | }; | ||
109 | |||
110 | static void get_writeback_state(struct writeback_state *wbs) | ||
111 | { | ||
112 | wbs->nr_dirty = read_page_state(nr_dirty); | ||
113 | wbs->nr_unstable = read_page_state(nr_unstable); | ||
114 | wbs->nr_mapped = read_page_state(nr_mapped); | ||
115 | wbs->nr_writeback = read_page_state(nr_writeback); | ||
116 | } | ||
117 | |||
118 | /* | 102 | /* |
119 | * Work out the current dirty-memory clamping and background writeout | 103 | * Work out the current dirty-memory clamping and background writeout |
120 | * thresholds. | 104 | * thresholds. |
@@ -133,8 +117,8 @@ static void get_writeback_state(struct writeback_state *wbs) | |||
133 | * clamping level. | 117 | * clamping level. |
134 | */ | 118 | */ |
135 | static void | 119 | static void |
136 | get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, | 120 | get_dirty_limits(long *pbackground, long *pdirty, |
137 | struct address_space *mapping) | 121 | struct address_space *mapping) |
138 | { | 122 | { |
139 | int background_ratio; /* Percentages */ | 123 | int background_ratio; /* Percentages */ |
140 | int dirty_ratio; | 124 | int dirty_ratio; |
@@ -144,8 +128,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, | |||
144 | unsigned long available_memory = total_pages; | 128 | unsigned long available_memory = total_pages; |
145 | struct task_struct *tsk; | 129 | struct task_struct *tsk; |
146 | 130 | ||
147 | get_writeback_state(wbs); | ||
148 | |||
149 | #ifdef CONFIG_HIGHMEM | 131 | #ifdef CONFIG_HIGHMEM |
150 | /* | 132 | /* |
151 | * If this mapping can only allocate from low memory, | 133 | * If this mapping can only allocate from low memory, |
@@ -156,7 +138,9 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, | |||
156 | #endif | 138 | #endif |
157 | 139 | ||
158 | 140 | ||
159 | unmapped_ratio = 100 - (wbs->nr_mapped * 100) / total_pages; | 141 | unmapped_ratio = 100 - ((global_page_state(NR_FILE_MAPPED) + |
142 | global_page_state(NR_ANON_PAGES)) * 100) / | ||
143 | total_pages; | ||
160 | 144 | ||
161 | dirty_ratio = vm_dirty_ratio; | 145 | dirty_ratio = vm_dirty_ratio; |
162 | if (dirty_ratio > unmapped_ratio / 2) | 146 | if (dirty_ratio > unmapped_ratio / 2) |
@@ -189,7 +173,6 @@ get_dirty_limits(struct writeback_state *wbs, long *pbackground, long *pdirty, | |||
189 | */ | 173 | */ |
190 | static void balance_dirty_pages(struct address_space *mapping) | 174 | static void balance_dirty_pages(struct address_space *mapping) |
191 | { | 175 | { |
192 | struct writeback_state wbs; | ||
193 | long nr_reclaimable; | 176 | long nr_reclaimable; |
194 | long background_thresh; | 177 | long background_thresh; |
195 | long dirty_thresh; | 178 | long dirty_thresh; |
@@ -204,13 +187,15 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
204 | .sync_mode = WB_SYNC_NONE, | 187 | .sync_mode = WB_SYNC_NONE, |
205 | .older_than_this = NULL, | 188 | .older_than_this = NULL, |
206 | .nr_to_write = write_chunk, | 189 | .nr_to_write = write_chunk, |
190 | .range_cyclic = 1, | ||
207 | }; | 191 | }; |
208 | 192 | ||
209 | get_dirty_limits(&wbs, &background_thresh, | 193 | get_dirty_limits(&background_thresh, &dirty_thresh, mapping); |
210 | &dirty_thresh, mapping); | 194 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
211 | nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | 195 | global_page_state(NR_UNSTABLE_NFS); |
212 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | 196 | if (nr_reclaimable + global_page_state(NR_WRITEBACK) <= |
213 | break; | 197 | dirty_thresh) |
198 | break; | ||
214 | 199 | ||
215 | if (!dirty_exceeded) | 200 | if (!dirty_exceeded) |
216 | dirty_exceeded = 1; | 201 | dirty_exceeded = 1; |
@@ -223,11 +208,14 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
223 | */ | 208 | */ |
224 | if (nr_reclaimable) { | 209 | if (nr_reclaimable) { |
225 | writeback_inodes(&wbc); | 210 | writeback_inodes(&wbc); |
226 | get_dirty_limits(&wbs, &background_thresh, | 211 | get_dirty_limits(&background_thresh, |
227 | &dirty_thresh, mapping); | 212 | &dirty_thresh, mapping); |
228 | nr_reclaimable = wbs.nr_dirty + wbs.nr_unstable; | 213 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
229 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh) | 214 | global_page_state(NR_UNSTABLE_NFS); |
230 | break; | 215 | if (nr_reclaimable + |
216 | global_page_state(NR_WRITEBACK) | ||
217 | <= dirty_thresh) | ||
218 | break; | ||
231 | pages_written += write_chunk - wbc.nr_to_write; | 219 | pages_written += write_chunk - wbc.nr_to_write; |
232 | if (pages_written >= write_chunk) | 220 | if (pages_written >= write_chunk) |
233 | break; /* We've done our duty */ | 221 | break; /* We've done our duty */ |
@@ -235,8 +223,9 @@ static void balance_dirty_pages(struct address_space *mapping) | |||
235 | blk_congestion_wait(WRITE, HZ/10); | 223 | blk_congestion_wait(WRITE, HZ/10); |
236 | } | 224 | } |
237 | 225 | ||
238 | if (nr_reclaimable + wbs.nr_writeback <= dirty_thresh && dirty_exceeded) | 226 | if (nr_reclaimable + global_page_state(NR_WRITEBACK) |
239 | dirty_exceeded = 0; | 227 | <= dirty_thresh && dirty_exceeded) |
228 | dirty_exceeded = 0; | ||
240 | 229 | ||
241 | if (writeback_in_progress(bdi)) | 230 | if (writeback_in_progress(bdi)) |
242 | return; /* pdflush is already working this queue */ | 231 | return; /* pdflush is already working this queue */ |
@@ -298,12 +287,11 @@ EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); | |||
298 | 287 | ||
299 | void throttle_vm_writeout(void) | 288 | void throttle_vm_writeout(void) |
300 | { | 289 | { |
301 | struct writeback_state wbs; | ||
302 | long background_thresh; | 290 | long background_thresh; |
303 | long dirty_thresh; | 291 | long dirty_thresh; |
304 | 292 | ||
305 | for ( ; ; ) { | 293 | for ( ; ; ) { |
306 | get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | 294 | get_dirty_limits(&background_thresh, &dirty_thresh, NULL); |
307 | 295 | ||
308 | /* | 296 | /* |
309 | * Boost the allowable dirty threshold a bit for page | 297 | * Boost the allowable dirty threshold a bit for page |
@@ -311,8 +299,9 @@ void throttle_vm_writeout(void) | |||
311 | */ | 299 | */ |
312 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ | 300 | dirty_thresh += dirty_thresh / 10; /* wheeee... */ |
313 | 301 | ||
314 | if (wbs.nr_unstable + wbs.nr_writeback <= dirty_thresh) | 302 | if (global_page_state(NR_UNSTABLE_NFS) + |
315 | break; | 303 | global_page_state(NR_WRITEBACK) <= dirty_thresh) |
304 | break; | ||
316 | blk_congestion_wait(WRITE, HZ/10); | 305 | blk_congestion_wait(WRITE, HZ/10); |
317 | } | 306 | } |
318 | } | 307 | } |
@@ -331,15 +320,16 @@ static void background_writeout(unsigned long _min_pages) | |||
331 | .older_than_this = NULL, | 320 | .older_than_this = NULL, |
332 | .nr_to_write = 0, | 321 | .nr_to_write = 0, |
333 | .nonblocking = 1, | 322 | .nonblocking = 1, |
323 | .range_cyclic = 1, | ||
334 | }; | 324 | }; |
335 | 325 | ||
336 | for ( ; ; ) { | 326 | for ( ; ; ) { |
337 | struct writeback_state wbs; | ||
338 | long background_thresh; | 327 | long background_thresh; |
339 | long dirty_thresh; | 328 | long dirty_thresh; |
340 | 329 | ||
341 | get_dirty_limits(&wbs, &background_thresh, &dirty_thresh, NULL); | 330 | get_dirty_limits(&background_thresh, &dirty_thresh, NULL); |
342 | if (wbs.nr_dirty + wbs.nr_unstable < background_thresh | 331 | if (global_page_state(NR_FILE_DIRTY) + |
332 | global_page_state(NR_UNSTABLE_NFS) < background_thresh | ||
343 | && min_pages <= 0) | 333 | && min_pages <= 0) |
344 | break; | 334 | break; |
345 | wbc.encountered_congestion = 0; | 335 | wbc.encountered_congestion = 0; |
@@ -363,12 +353,9 @@ static void background_writeout(unsigned long _min_pages) | |||
363 | */ | 353 | */ |
364 | int wakeup_pdflush(long nr_pages) | 354 | int wakeup_pdflush(long nr_pages) |
365 | { | 355 | { |
366 | if (nr_pages == 0) { | 356 | if (nr_pages == 0) |
367 | struct writeback_state wbs; | 357 | nr_pages = global_page_state(NR_FILE_DIRTY) + |
368 | 358 | global_page_state(NR_UNSTABLE_NFS); | |
369 | get_writeback_state(&wbs); | ||
370 | nr_pages = wbs.nr_dirty + wbs.nr_unstable; | ||
371 | } | ||
372 | return pdflush_operation(background_writeout, nr_pages); | 359 | return pdflush_operation(background_writeout, nr_pages); |
373 | } | 360 | } |
374 | 361 | ||
@@ -399,7 +386,6 @@ static void wb_kupdate(unsigned long arg) | |||
399 | unsigned long start_jif; | 386 | unsigned long start_jif; |
400 | unsigned long next_jif; | 387 | unsigned long next_jif; |
401 | long nr_to_write; | 388 | long nr_to_write; |
402 | struct writeback_state wbs; | ||
403 | struct writeback_control wbc = { | 389 | struct writeback_control wbc = { |
404 | .bdi = NULL, | 390 | .bdi = NULL, |
405 | .sync_mode = WB_SYNC_NONE, | 391 | .sync_mode = WB_SYNC_NONE, |
@@ -407,15 +393,16 @@ static void wb_kupdate(unsigned long arg) | |||
407 | .nr_to_write = 0, | 393 | .nr_to_write = 0, |
408 | .nonblocking = 1, | 394 | .nonblocking = 1, |
409 | .for_kupdate = 1, | 395 | .for_kupdate = 1, |
396 | .range_cyclic = 1, | ||
410 | }; | 397 | }; |
411 | 398 | ||
412 | sync_supers(); | 399 | sync_supers(); |
413 | 400 | ||
414 | get_writeback_state(&wbs); | ||
415 | oldest_jif = jiffies - dirty_expire_interval; | 401 | oldest_jif = jiffies - dirty_expire_interval; |
416 | start_jif = jiffies; | 402 | start_jif = jiffies; |
417 | next_jif = start_jif + dirty_writeback_interval; | 403 | next_jif = start_jif + dirty_writeback_interval; |
418 | nr_to_write = wbs.nr_dirty + wbs.nr_unstable + | 404 | nr_to_write = global_page_state(NR_FILE_DIRTY) + |
405 | global_page_state(NR_UNSTABLE_NFS) + | ||
419 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); | 406 | (inodes_stat.nr_inodes - inodes_stat.nr_unused); |
420 | while (nr_to_write > 0) { | 407 | while (nr_to_write > 0) { |
421 | wbc.encountered_congestion = 0; | 408 | wbc.encountered_congestion = 0; |
@@ -513,14 +500,14 @@ static void set_ratelimit(void) | |||
513 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | 500 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; |
514 | } | 501 | } |
515 | 502 | ||
516 | static int | 503 | static int __cpuinit |
517 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) | 504 | ratelimit_handler(struct notifier_block *self, unsigned long u, void *v) |
518 | { | 505 | { |
519 | set_ratelimit(); | 506 | set_ratelimit(); |
520 | return 0; | 507 | return 0; |
521 | } | 508 | } |
522 | 509 | ||
523 | static struct notifier_block ratelimit_nb = { | 510 | static struct notifier_block __cpuinitdata ratelimit_nb = { |
524 | .notifier_call = ratelimit_handler, | 511 | .notifier_call = ratelimit_handler, |
525 | .next = NULL, | 512 | .next = NULL, |
526 | }; | 513 | }; |
@@ -637,7 +624,8 @@ int __set_page_dirty_nobuffers(struct page *page) | |||
637 | if (mapping2) { /* Race with truncate? */ | 624 | if (mapping2) { /* Race with truncate? */ |
638 | BUG_ON(mapping2 != mapping); | 625 | BUG_ON(mapping2 != mapping); |
639 | if (mapping_cap_account_dirty(mapping)) | 626 | if (mapping_cap_account_dirty(mapping)) |
640 | inc_page_state(nr_dirty); | 627 | __inc_zone_page_state(page, |
628 | NR_FILE_DIRTY); | ||
641 | radix_tree_tag_set(&mapping->page_tree, | 629 | radix_tree_tag_set(&mapping->page_tree, |
642 | page_index(page), PAGECACHE_TAG_DIRTY); | 630 | page_index(page), PAGECACHE_TAG_DIRTY); |
643 | } | 631 | } |
@@ -724,9 +712,9 @@ int test_clear_page_dirty(struct page *page) | |||
724 | radix_tree_tag_clear(&mapping->page_tree, | 712 | radix_tree_tag_clear(&mapping->page_tree, |
725 | page_index(page), | 713 | page_index(page), |
726 | PAGECACHE_TAG_DIRTY); | 714 | PAGECACHE_TAG_DIRTY); |
727 | write_unlock_irqrestore(&mapping->tree_lock, flags); | ||
728 | if (mapping_cap_account_dirty(mapping)) | 715 | if (mapping_cap_account_dirty(mapping)) |
729 | dec_page_state(nr_dirty); | 716 | __dec_zone_page_state(page, NR_FILE_DIRTY); |
717 | write_unlock_irqrestore(&mapping->tree_lock, flags); | ||
730 | return 1; | 718 | return 1; |
731 | } | 719 | } |
732 | write_unlock_irqrestore(&mapping->tree_lock, flags); | 720 | write_unlock_irqrestore(&mapping->tree_lock, flags); |
@@ -757,7 +745,7 @@ int clear_page_dirty_for_io(struct page *page) | |||
757 | if (mapping) { | 745 | if (mapping) { |
758 | if (TestClearPageDirty(page)) { | 746 | if (TestClearPageDirty(page)) { |
759 | if (mapping_cap_account_dirty(mapping)) | 747 | if (mapping_cap_account_dirty(mapping)) |
760 | dec_page_state(nr_dirty); | 748 | dec_zone_page_state(page, NR_FILE_DIRTY); |
761 | return 1; | 749 | return 1; |
762 | } | 750 | } |
763 | return 0; | 751 | return 0; |