aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--fs/fs-writeback.c23
-rw-r--r--include/linux/writeback.h11
2 files changed, 21 insertions, 13 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 227ff12257f3..50445cf0b83a 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -30,15 +30,6 @@
30#include "internal.h" 30#include "internal.h"
31 31
32/* 32/*
33 * The maximum number of pages to writeout in a single bdi flush/kupdate
34 * operation. We do this so we don't hold I_SYNC against an inode for
35 * enormous amounts of time, which would block a userspace task which has
36 * been forced to throttle against that inode. Also, the code reevaluates
37 * the dirty each time it has written this many pages.
38 */
39#define MAX_WRITEBACK_PAGES 1024L
40
41/*
42 * Passed into wb_writeback(), essentially a subset of writeback_control 33 * Passed into wb_writeback(), essentially a subset of writeback_control
43 */ 34 */
44struct wb_writeback_work { 35struct wb_writeback_work {
@@ -515,7 +506,8 @@ static bool pin_sb_for_writeback(struct super_block *sb)
515 return false; 506 return false;
516} 507}
517 508
518static long writeback_chunk_size(struct wb_writeback_work *work) 509static long writeback_chunk_size(struct backing_dev_info *bdi,
510 struct wb_writeback_work *work)
519{ 511{
520 long pages; 512 long pages;
521 513
@@ -534,8 +526,13 @@ static long writeback_chunk_size(struct wb_writeback_work *work)
534 */ 526 */
535 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages) 527 if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
536 pages = LONG_MAX; 528 pages = LONG_MAX;
537 else 529 else {
538 pages = min(MAX_WRITEBACK_PAGES, work->nr_pages); 530 pages = min(bdi->avg_write_bandwidth / 2,
531 global_dirty_limit / DIRTY_SCOPE);
532 pages = min(pages, work->nr_pages);
533 pages = round_down(pages + MIN_WRITEBACK_PAGES,
534 MIN_WRITEBACK_PAGES);
535 }
539 536
540 return pages; 537 return pages;
541} 538}
@@ -600,7 +597,7 @@ static long writeback_sb_inodes(struct super_block *sb,
600 continue; 597 continue;
601 } 598 }
602 __iget(inode); 599 __iget(inode);
603 write_chunk = writeback_chunk_size(work); 600 write_chunk = writeback_chunk_size(wb->bdi, work);
604 wbc.nr_to_write = write_chunk; 601 wbc.nr_to_write = write_chunk;
605 wbc.pages_skipped = 0; 602 wbc.pages_skipped = 0;
606 603
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index b625073b80c8..f1bfa12ea246 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -8,6 +8,10 @@
8#include <linux/fs.h> 8#include <linux/fs.h>
9 9
10/* 10/*
11 * The 1/4 region under the global dirty thresh is for smooth dirty throttling:
12 *
13 * (thresh - thresh/DIRTY_FULL_SCOPE, thresh)
14 *
11 * The 1/16 region above the global dirty limit will be put to maximum pauses: 15 * The 1/16 region above the global dirty limit will be put to maximum pauses:
12 * 16 *
13 * (limit, limit + limit/DIRTY_MAXPAUSE_AREA) 17 * (limit, limit + limit/DIRTY_MAXPAUSE_AREA)
@@ -25,9 +29,16 @@
25 * knocks down the global dirty threshold quickly, in which case the global 29 * knocks down the global dirty threshold quickly, in which case the global
26 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks. 30 * dirty limit will follow down slowly to prevent livelocking all dirtier tasks.
27 */ 31 */
32#define DIRTY_SCOPE 8
33#define DIRTY_FULL_SCOPE (DIRTY_SCOPE / 2)
28#define DIRTY_MAXPAUSE_AREA 16 34#define DIRTY_MAXPAUSE_AREA 16
29#define DIRTY_PASSGOOD_AREA 8 35#define DIRTY_PASSGOOD_AREA 8
30 36
37/*
38 * 4MB minimal write chunk size
39 */
40#define MIN_WRITEBACK_PAGES (4096UL >> (PAGE_CACHE_SHIFT - 10))
41
31struct backing_dev_info; 42struct backing_dev_info;
32 43
33/* 44/*