aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:15:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-10-26 20:15:20 -0400
commit31453a9764f7e2a72a6e2c502ace586e2663a68c (patch)
tree5d4db63de5b4b85d1ffdab4e95a75175a784a10a /mm/page-writeback.c
parentf9ba5375a8aae4aeea6be15df77e24707a429812 (diff)
parent93ed0e2d07b25aff4db1d61bfbcd1e82074c0ad5 (diff)
Merge branch 'akpm-incoming-1'
* akpm-incoming-1: (176 commits) scripts/checkpatch.pl: add check for declaration of pci_device_id scripts/checkpatch.pl: add warnings for static char that could be static const char checkpatch: version 0.31 checkpatch: statement/block context analyser should look at sanitised lines checkpatch: handle EXPORT_SYMBOL for DEVICE_ATTR and similar checkpatch: clean up structure definition macro handline checkpatch: update copyright dates checkpatch: Add additional attribute #defines checkpatch: check for incorrect permissions checkpatch: ensure kconfig help checks only apply when we are adding help checkpatch: simplify and consolidate "missing space after" checks checkpatch: add check for space after struct, union, and enum checkpatch: returning errno typically should be negative checkpatch: handle casts better fixing false categorisation of : as binary checkpatch: ensure we do not collapse bracketed sections into constants checkpatch: suggest cleanpatch and cleanfile when appropriate checkpatch: types may sit on a line on their own checkpatch: fix regressions in "fix handling of leading spaces" div64_u64(): improve precision on 32bit platforms lib/parser: cleanup match_number() ...
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c31
1 files changed, 19 insertions, 12 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index e3bccac1f025..b840afa89761 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -415,14 +415,8 @@ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty)
415 415
416 if (vm_dirty_bytes) 416 if (vm_dirty_bytes)
417 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE); 417 dirty = DIV_ROUND_UP(vm_dirty_bytes, PAGE_SIZE);
418 else { 418 else
419 int dirty_ratio; 419 dirty = (vm_dirty_ratio * available_memory) / 100;
420
421 dirty_ratio = vm_dirty_ratio;
422 if (dirty_ratio < 5)
423 dirty_ratio = 5;
424 dirty = (dirty_ratio * available_memory) / 100;
425 }
426 420
427 if (dirty_background_bytes) 421 if (dirty_background_bytes)
428 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE); 422 background = DIV_ROUND_UP(dirty_background_bytes, PAGE_SIZE);
@@ -510,7 +504,7 @@ static void balance_dirty_pages(struct address_space *mapping,
510 * catch-up. This avoids (excessively) small writeouts 504 * catch-up. This avoids (excessively) small writeouts
511 * when the bdi limits are ramping up. 505 * when the bdi limits are ramping up.
512 */ 506 */
513 if (nr_reclaimable + nr_writeback < 507 if (nr_reclaimable + nr_writeback <=
514 (background_thresh + dirty_thresh) / 2) 508 (background_thresh + dirty_thresh) / 2)
515 break; 509 break;
516 510
@@ -542,8 +536,8 @@ static void balance_dirty_pages(struct address_space *mapping,
542 * the last resort safeguard. 536 * the last resort safeguard.
543 */ 537 */
544 dirty_exceeded = 538 dirty_exceeded =
545 (bdi_nr_reclaimable + bdi_nr_writeback >= bdi_thresh) 539 (bdi_nr_reclaimable + bdi_nr_writeback > bdi_thresh)
546 || (nr_reclaimable + nr_writeback >= dirty_thresh); 540 || (nr_reclaimable + nr_writeback > dirty_thresh);
547 541
548 if (!dirty_exceeded) 542 if (!dirty_exceeded)
549 break; 543 break;
@@ -1121,6 +1115,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
1121{ 1115{
1122 if (mapping_cap_account_dirty(mapping)) { 1116 if (mapping_cap_account_dirty(mapping)) {
1123 __inc_zone_page_state(page, NR_FILE_DIRTY); 1117 __inc_zone_page_state(page, NR_FILE_DIRTY);
1118 __inc_zone_page_state(page, NR_DIRTIED);
1124 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); 1119 __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE);
1125 task_dirty_inc(current); 1120 task_dirty_inc(current);
1126 task_io_account_write(PAGE_CACHE_SIZE); 1121 task_io_account_write(PAGE_CACHE_SIZE);
@@ -1129,6 +1124,18 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
1129EXPORT_SYMBOL(account_page_dirtied); 1124EXPORT_SYMBOL(account_page_dirtied);
1130 1125
1131/* 1126/*
1127 * Helper function for set_page_writeback family.
1128 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
1129 * wrt interrupts.
1130 */
1131void account_page_writeback(struct page *page)
1132{
1133 inc_zone_page_state(page, NR_WRITEBACK);
1134 inc_zone_page_state(page, NR_WRITTEN);
1135}
1136EXPORT_SYMBOL(account_page_writeback);
1137
1138/*
1132 * For address_spaces which do not use buffers. Just tag the page as dirty in 1139 * For address_spaces which do not use buffers. Just tag the page as dirty in
1133 * its radix tree. 1140 * its radix tree.
1134 * 1141 *
@@ -1366,7 +1373,7 @@ int test_set_page_writeback(struct page *page)
1366 ret = TestSetPageWriteback(page); 1373 ret = TestSetPageWriteback(page);
1367 } 1374 }
1368 if (!ret) 1375 if (!ret)
1369 inc_zone_page_state(page, NR_WRITEBACK); 1376 account_page_writeback(page);
1370 return ret; 1377 return ret;
1371 1378
1372} 1379}