diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2011-12-16 12:32:57 -0500 |
---|---|---|
committer | Chris Mason <chris.mason@oracle.com> | 2011-12-16 12:32:57 -0500 |
commit | 142349f541d0bb6bc3e0d4563268105aada42b0b (patch) | |
tree | c86e13b5cf8b87b3b4f3c5218442e221ce17f67b /fs/btrfs/file.c | |
parent | dc47ce90c3a822cd7c9e9339fe4d5f61dcb26b50 (diff) |
btrfs: lower the dirty balance poll interval
Tests show that the original large intervals can easily make the dirty
limit exceeded on 100 concurrent dd's. So adapt to as large as the
next check point selected by the dirty throttling algorithm.
Signed-off-by: Wu Fengguang <fengguang.wu@intel.com>
Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/file.c')
-rw-r--r-- | fs/btrfs/file.c | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index dafdfa059bf6..52305a885c3f 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -1167,6 +1167,8 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file, | |||
1167 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / | 1167 | nrptrs = min((iov_iter_count(i) + PAGE_CACHE_SIZE - 1) / |
1168 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / | 1168 | PAGE_CACHE_SIZE, PAGE_CACHE_SIZE / |
1169 | (sizeof(struct page *))); | 1169 | (sizeof(struct page *))); |
1170 | nrptrs = min(nrptrs, current->nr_dirtied_pause - current->nr_dirtied); | ||
1171 | nrptrs = max(nrptrs, 8); | ||
1170 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); | 1172 | pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL); |
1171 | if (!pages) | 1173 | if (!pages) |
1172 | return -ENOMEM; | 1174 | return -ENOMEM; |