diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2011-04-14 09:52:37 -0400 |
---|---|---|
committer | Wu Fengguang <fengguang.wu@intel.com> | 2011-12-18 01:20:22 -0500 |
commit | d3bc1fef9389e409a772ea174a5e41a6f93d9b7b (patch) | |
tree | d1e47354263b7c930a7ec4428909693d10a10c50 /mm | |
parent | 54848d73f9f254631303d6eab9b976855988b266 (diff) |
writeback: fix dirtied pages accounting on sub-page writes
When dd in 512bytes, generic_perform_write() calls
balance_dirty_pages_ratelimited() 8 times for the same page, but
obviously the page is only dirtied once.
Fix it by accounting tsk->nr_dirtied and bdp_ratelimits at page dirty time.
Acked-by: Jan Kara <jack@suse.cz>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 619c445fc03c..5d1ef5d8613a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -1258,8 +1258,6 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
1258 | if (bdi->dirty_exceeded) | 1258 | if (bdi->dirty_exceeded) |
1259 | ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); | 1259 | ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); |
1260 | 1260 | ||
1261 | current->nr_dirtied += nr_pages_dirtied; | ||
1262 | |||
1263 | preempt_disable(); | 1261 | preempt_disable(); |
1264 | /* | 1262 | /* |
1265 | * This prevents one CPU to accumulate too many dirtied pages without | 1263 | * This prevents one CPU to accumulate too many dirtied pages without |
@@ -1270,12 +1268,9 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
1270 | p = &__get_cpu_var(bdp_ratelimits); | 1268 | p = &__get_cpu_var(bdp_ratelimits); |
1271 | if (unlikely(current->nr_dirtied >= ratelimit)) | 1269 | if (unlikely(current->nr_dirtied >= ratelimit)) |
1272 | *p = 0; | 1270 | *p = 0; |
1273 | else { | 1271 | else if (unlikely(*p >= ratelimit_pages)) { |
1274 | *p += nr_pages_dirtied; | 1272 | *p = 0; |
1275 | if (unlikely(*p >= ratelimit_pages)) { | 1273 | ratelimit = 0; |
1276 | *p = 0; | ||
1277 | ratelimit = 0; | ||
1278 | } | ||
1279 | } | 1274 | } |
1280 | /* | 1275 | /* |
1281 | * Pick up the dirtied pages by the exited tasks. This avoids lots of | 1276 | * Pick up the dirtied pages by the exited tasks. This avoids lots of |
@@ -1768,6 +1763,8 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
1768 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 1763 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
1769 | __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); | 1764 | __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); |
1770 | task_io_account_write(PAGE_CACHE_SIZE); | 1765 | task_io_account_write(PAGE_CACHE_SIZE); |
1766 | current->nr_dirtied++; | ||
1767 | this_cpu_inc(bdp_ratelimits); | ||
1771 | } | 1768 | } |
1772 | } | 1769 | } |
1773 | EXPORT_SYMBOL(account_page_dirtied); | 1770 | EXPORT_SYMBOL(account_page_dirtied); |