aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/btrfs/disk-io.c8
-rw-r--r--fs/btrfs/file.c3
-rw-r--r--fs/btrfs/ioctl.c2
-rw-r--r--fs/ocfs2/file.c5
-rw-r--r--fs/splice.c5
-rw-r--r--include/linux/writeback.h9
-rw-r--r--mm/page-writeback.c11
7 files changed, 14 insertions, 29 deletions
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7cda51995c1e..22a0439e5a86 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3416,8 +3416,8 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3416 num_dirty = root->fs_info->dirty_metadata_bytes; 3416 num_dirty = root->fs_info->dirty_metadata_bytes;
3417 3417
3418 if (num_dirty > thresh) { 3418 if (num_dirty > thresh) {
3419 balance_dirty_pages_ratelimited_nr( 3419 balance_dirty_pages_ratelimited(
3420 root->fs_info->btree_inode->i_mapping, 1); 3420 root->fs_info->btree_inode->i_mapping);
3421 } 3421 }
3422 return; 3422 return;
3423} 3423}
@@ -3437,8 +3437,8 @@ void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr)
3437 num_dirty = root->fs_info->dirty_metadata_bytes; 3437 num_dirty = root->fs_info->dirty_metadata_bytes;
3438 3438
3439 if (num_dirty > thresh) { 3439 if (num_dirty > thresh) {
3440 balance_dirty_pages_ratelimited_nr( 3440 balance_dirty_pages_ratelimited(
3441 root->fs_info->btree_inode->i_mapping, 1); 3441 root->fs_info->btree_inode->i_mapping);
3442 } 3442 }
3443 return; 3443 return;
3444} 3444}
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 9ab1bed88116..a8ee75cb96ee 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1346,8 +1346,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1346 1346
1347 cond_resched(); 1347 cond_resched();
1348 1348
1349 balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1349 balance_dirty_pages_ratelimited(inode->i_mapping);
1350 dirty_pages);
1351 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1) 1350 if (dirty_pages < (root->leafsize >> PAGE_CACHE_SHIFT) + 1)
1352 btrfs_btree_balance_dirty(root, 1); 1351 btrfs_btree_balance_dirty(root, 1);
1353 1352
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 8fcf9a59c28d..5b3429ab8ec1 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -1225,7 +1225,7 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1225 } 1225 }
1226 1226
1227 defrag_count += ret; 1227 defrag_count += ret;
1228 balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); 1228 balance_dirty_pages_ratelimited(inode->i_mapping);
1229 mutex_unlock(&inode->i_mutex); 1229 mutex_unlock(&inode->i_mutex);
1230 1230
1231 if (newer_than) { 1231 if (newer_than) {
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 5a4ee77cec51..dda089804942 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2513,18 +2513,15 @@ static ssize_t ocfs2_file_splice_write(struct pipe_inode_info *pipe,
2513 ret = sd.num_spliced; 2513 ret = sd.num_spliced;
2514 2514
2515 if (ret > 0) { 2515 if (ret > 0) {
2516 unsigned long nr_pages;
2517 int err; 2516 int err;
2518 2517
2519 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
2520
2521 err = generic_write_sync(out, *ppos, ret); 2518 err = generic_write_sync(out, *ppos, ret);
2522 if (err) 2519 if (err)
2523 ret = err; 2520 ret = err;
2524 else 2521 else
2525 *ppos += ret; 2522 *ppos += ret;
2526 2523
2527 balance_dirty_pages_ratelimited_nr(mapping, nr_pages); 2524 balance_dirty_pages_ratelimited(mapping);
2528 } 2525 }
2529 2526
2530 return ret; 2527 return ret;
diff --git a/fs/splice.c b/fs/splice.c
index 13e5b4776e7a..8890604e3fcd 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1024,17 +1024,14 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
1024 ret = sd.num_spliced; 1024 ret = sd.num_spliced;
1025 1025
1026 if (ret > 0) { 1026 if (ret > 0) {
1027 unsigned long nr_pages;
1028 int err; 1027 int err;
1029 1028
1030 nr_pages = (ret + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1031
1032 err = generic_write_sync(out, *ppos, ret); 1029 err = generic_write_sync(out, *ppos, ret);
1033 if (err) 1030 if (err)
1034 ret = err; 1031 ret = err;
1035 else 1032 else
1036 *ppos += ret; 1033 *ppos += ret;
1037 balance_dirty_pages_ratelimited_nr(mapping, nr_pages); 1034 balance_dirty_pages_ratelimited(mapping);
1038 } 1035 }
1039 sb_end_write(inode->i_sb); 1036 sb_end_write(inode->i_sb);
1040 1037
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 50c3e8fa06a8..b82a83aba311 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -161,14 +161,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi,
161 unsigned long start_time); 161 unsigned long start_time);
162 162
163void page_writeback_init(void); 163void page_writeback_init(void);
164void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 164void balance_dirty_pages_ratelimited(struct address_space *mapping);
165 unsigned long nr_pages_dirtied);
166
167static inline void
168balance_dirty_pages_ratelimited(struct address_space *mapping)
169{
170 balance_dirty_pages_ratelimited_nr(mapping, 1);
171}
172 165
173typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc, 166typedef int (*writepage_t)(struct page *page, struct writeback_control *wbc,
174 void *data); 167 void *data);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 830893b2b3c7..6f4271224493 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1069,7 +1069,7 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi,
1069} 1069}
1070 1070
1071/* 1071/*
1072 * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr() 1072 * After a task dirtied this many pages, balance_dirty_pages_ratelimited()
1073 * will look to see if it needs to start dirty throttling. 1073 * will look to see if it needs to start dirty throttling.
1074 * 1074 *
1075 * If dirty_poll_interval is too low, big NUMA machines will call the expensive 1075 * If dirty_poll_interval is too low, big NUMA machines will call the expensive
@@ -1436,9 +1436,8 @@ static DEFINE_PER_CPU(int, bdp_ratelimits);
1436DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0; 1436DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1437 1437
1438/** 1438/**
1439 * balance_dirty_pages_ratelimited_nr - balance dirty memory state 1439 * balance_dirty_pages_ratelimited - balance dirty memory state
1440 * @mapping: address_space which was dirtied 1440 * @mapping: address_space which was dirtied
1441 * @nr_pages_dirtied: number of pages which the caller has just dirtied
1442 * 1441 *
1443 * Processes which are dirtying memory should call in here once for each page 1442 * Processes which are dirtying memory should call in here once for each page
1444 * which was newly dirtied. The function will periodically check the system's 1443 * which was newly dirtied. The function will periodically check the system's
@@ -1449,8 +1448,7 @@ DEFINE_PER_CPU(int, dirty_throttle_leaks) = 0;
1449 * limit we decrease the ratelimiting by a lot, to prevent individual processes 1448 * limit we decrease the ratelimiting by a lot, to prevent individual processes
1450 * from overshooting the limit by (ratelimit_pages) each. 1449 * from overshooting the limit by (ratelimit_pages) each.
1451 */ 1450 */
1452void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, 1451void balance_dirty_pages_ratelimited(struct address_space *mapping)
1453 unsigned long nr_pages_dirtied)
1454{ 1452{
1455 struct backing_dev_info *bdi = mapping->backing_dev_info; 1453 struct backing_dev_info *bdi = mapping->backing_dev_info;
1456 int ratelimit; 1454 int ratelimit;
@@ -1484,6 +1482,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
1484 */ 1482 */
1485 p = &__get_cpu_var(dirty_throttle_leaks); 1483 p = &__get_cpu_var(dirty_throttle_leaks);
1486 if (*p > 0 && current->nr_dirtied < ratelimit) { 1484 if (*p > 0 && current->nr_dirtied < ratelimit) {
1485 unsigned long nr_pages_dirtied;
1487 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied); 1486 nr_pages_dirtied = min(*p, ratelimit - current->nr_dirtied);
1488 *p -= nr_pages_dirtied; 1487 *p -= nr_pages_dirtied;
1489 current->nr_dirtied += nr_pages_dirtied; 1488 current->nr_dirtied += nr_pages_dirtied;
@@ -1493,7 +1492,7 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping,
1493 if (unlikely(current->nr_dirtied >= ratelimit)) 1492 if (unlikely(current->nr_dirtied >= ratelimit))
1494 balance_dirty_pages(mapping, current->nr_dirtied); 1493 balance_dirty_pages(mapping, current->nr_dirtied);
1495} 1494}
1496EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); 1495EXPORT_SYMBOL(balance_dirty_pages_ratelimited);
1497 1496
1498void throttle_vm_writeout(gfp_t gfp_mask) 1497void throttle_vm_writeout(gfp_t gfp_mask)
1499{ 1498{