diff options
-rw-r--r-- | fs/btrfs/extent-tree.c | 3 | ||||
-rw-r--r-- | fs/buffer.c | 2 | ||||
-rw-r--r-- | fs/ext4/inode.c | 2 | ||||
-rw-r--r-- | fs/fs-writeback.c | 84 | ||||
-rw-r--r-- | fs/quota/quota.c | 2 | ||||
-rw-r--r-- | fs/sync.c | 4 | ||||
-rw-r--r-- | fs/ubifs/budget.c | 2 | ||||
-rw-r--r-- | include/linux/backing-dev.h | 14 | ||||
-rw-r--r-- | include/linux/sched.h | 7 | ||||
-rw-r--r-- | include/linux/writeback.h | 33 | ||||
-rw-r--r-- | include/trace/events/writeback.h | 161 | ||||
-rw-r--r-- | kernel/fork.c | 3 | ||||
-rw-r--r-- | mm/backing-dev.c | 7 | ||||
-rw-r--r-- | mm/page-writeback.c | 704 | ||||
-rw-r--r-- | mm/vmscan.c | 3 |
15 files changed, 806 insertions, 225 deletions
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index f5be06a2462f..c9ee0e18bbdc 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -3340,7 +3340,8 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3340 | smp_mb(); | 3340 | smp_mb(); |
3341 | nr_pages = min_t(unsigned long, nr_pages, | 3341 | nr_pages = min_t(unsigned long, nr_pages, |
3342 | root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT); | 3342 | root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT); |
3343 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages); | 3343 | writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages, |
3344 | WB_REASON_FS_FREE_SPACE); | ||
3344 | 3345 | ||
3345 | spin_lock(&space_info->lock); | 3346 | spin_lock(&space_info->lock); |
3346 | if (reserved > space_info->bytes_reserved) | 3347 | if (reserved > space_info->bytes_reserved) |
diff --git a/fs/buffer.c b/fs/buffer.c index 70a19745cb61..19d8eb7fdc81 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -288,7 +288,7 @@ static void free_more_memory(void) | |||
288 | struct zone *zone; | 288 | struct zone *zone; |
289 | int nid; | 289 | int nid; |
290 | 290 | ||
291 | wakeup_flusher_threads(1024); | 291 | wakeup_flusher_threads(1024, WB_REASON_FREE_MORE_MEM); |
292 | yield(); | 292 | yield(); |
293 | 293 | ||
294 | for_each_online_node(nid) { | 294 | for_each_online_node(nid) { |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index cc5a6da030a1..240f6e2dc7ee 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -2372,7 +2372,7 @@ static int ext4_nonda_switch(struct super_block *sb) | |||
2372 | * start pushing delalloc when 1/2 of free blocks are dirty. | 2372 | * start pushing delalloc when 1/2 of free blocks are dirty. |
2373 | */ | 2373 | */ |
2374 | if (free_blocks < 2 * dirty_blocks) | 2374 | if (free_blocks < 2 * dirty_blocks) |
2375 | writeback_inodes_sb_if_idle(sb); | 2375 | writeback_inodes_sb_if_idle(sb, WB_REASON_FS_FREE_SPACE); |
2376 | 2376 | ||
2377 | return 0; | 2377 | return 0; |
2378 | } | 2378 | } |
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 04cf3b91e501..73c3992b2bb4 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -41,11 +41,23 @@ struct wb_writeback_work { | |||
41 | unsigned int for_kupdate:1; | 41 | unsigned int for_kupdate:1; |
42 | unsigned int range_cyclic:1; | 42 | unsigned int range_cyclic:1; |
43 | unsigned int for_background:1; | 43 | unsigned int for_background:1; |
44 | enum wb_reason reason; /* why was writeback initiated? */ | ||
44 | 45 | ||
45 | struct list_head list; /* pending work list */ | 46 | struct list_head list; /* pending work list */ |
46 | struct completion *done; /* set if the caller waits */ | 47 | struct completion *done; /* set if the caller waits */ |
47 | }; | 48 | }; |
48 | 49 | ||
50 | const char *wb_reason_name[] = { | ||
51 | [WB_REASON_BACKGROUND] = "background", | ||
52 | [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", | ||
53 | [WB_REASON_SYNC] = "sync", | ||
54 | [WB_REASON_PERIODIC] = "periodic", | ||
55 | [WB_REASON_LAPTOP_TIMER] = "laptop_timer", | ||
56 | [WB_REASON_FREE_MORE_MEM] = "free_more_memory", | ||
57 | [WB_REASON_FS_FREE_SPACE] = "fs_free_space", | ||
58 | [WB_REASON_FORKER_THREAD] = "forker_thread" | ||
59 | }; | ||
60 | |||
49 | /* | 61 | /* |
50 | * Include the creation of the trace points after defining the | 62 | * Include the creation of the trace points after defining the |
51 | * wb_writeback_work structure so that the definition remains local to this | 63 | * wb_writeback_work structure so that the definition remains local to this |
@@ -115,7 +127,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, | |||
115 | 127 | ||
116 | static void | 128 | static void |
117 | __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | 129 | __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
118 | bool range_cyclic) | 130 | bool range_cyclic, enum wb_reason reason) |
119 | { | 131 | { |
120 | struct wb_writeback_work *work; | 132 | struct wb_writeback_work *work; |
121 | 133 | ||
@@ -135,6 +147,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | |||
135 | work->sync_mode = WB_SYNC_NONE; | 147 | work->sync_mode = WB_SYNC_NONE; |
136 | work->nr_pages = nr_pages; | 148 | work->nr_pages = nr_pages; |
137 | work->range_cyclic = range_cyclic; | 149 | work->range_cyclic = range_cyclic; |
150 | work->reason = reason; | ||
138 | 151 | ||
139 | bdi_queue_work(bdi, work); | 152 | bdi_queue_work(bdi, work); |
140 | } | 153 | } |
@@ -150,9 +163,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | |||
150 | * completion. Caller need not hold sb s_umount semaphore. | 163 | * completion. Caller need not hold sb s_umount semaphore. |
151 | * | 164 | * |
152 | */ | 165 | */ |
153 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) | 166 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
167 | enum wb_reason reason) | ||
154 | { | 168 | { |
155 | __bdi_start_writeback(bdi, nr_pages, true); | 169 | __bdi_start_writeback(bdi, nr_pages, true, reason); |
156 | } | 170 | } |
157 | 171 | ||
158 | /** | 172 | /** |
@@ -251,7 +265,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) | |||
251 | */ | 265 | */ |
252 | static int move_expired_inodes(struct list_head *delaying_queue, | 266 | static int move_expired_inodes(struct list_head *delaying_queue, |
253 | struct list_head *dispatch_queue, | 267 | struct list_head *dispatch_queue, |
254 | unsigned long *older_than_this) | 268 | struct wb_writeback_work *work) |
255 | { | 269 | { |
256 | LIST_HEAD(tmp); | 270 | LIST_HEAD(tmp); |
257 | struct list_head *pos, *node; | 271 | struct list_head *pos, *node; |
@@ -262,8 +276,8 @@ static int move_expired_inodes(struct list_head *delaying_queue, | |||
262 | 276 | ||
263 | while (!list_empty(delaying_queue)) { | 277 | while (!list_empty(delaying_queue)) { |
264 | inode = wb_inode(delaying_queue->prev); | 278 | inode = wb_inode(delaying_queue->prev); |
265 | if (older_than_this && | 279 | if (work->older_than_this && |
266 | inode_dirtied_after(inode, *older_than_this)) | 280 | inode_dirtied_after(inode, *work->older_than_this)) |
267 | break; | 281 | break; |
268 | if (sb && sb != inode->i_sb) | 282 | if (sb && sb != inode->i_sb) |
269 | do_sb_sort = 1; | 283 | do_sb_sort = 1; |
@@ -302,13 +316,13 @@ out: | |||
302 | * | | 316 | * | |
303 | * +--> dequeue for IO | 317 | * +--> dequeue for IO |
304 | */ | 318 | */ |
305 | static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) | 319 | static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) |
306 | { | 320 | { |
307 | int moved; | 321 | int moved; |
308 | assert_spin_locked(&wb->list_lock); | 322 | assert_spin_locked(&wb->list_lock); |
309 | list_splice_init(&wb->b_more_io, &wb->b_io); | 323 | list_splice_init(&wb->b_more_io, &wb->b_io); |
310 | moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); | 324 | moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); |
311 | trace_writeback_queue_io(wb, older_than_this, moved); | 325 | trace_writeback_queue_io(wb, work, moved); |
312 | } | 326 | } |
313 | 327 | ||
314 | static int write_inode(struct inode *inode, struct writeback_control *wbc) | 328 | static int write_inode(struct inode *inode, struct writeback_control *wbc) |
@@ -641,31 +655,40 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, | |||
641 | return wrote; | 655 | return wrote; |
642 | } | 656 | } |
643 | 657 | ||
644 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages) | 658 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, |
659 | enum wb_reason reason) | ||
645 | { | 660 | { |
646 | struct wb_writeback_work work = { | 661 | struct wb_writeback_work work = { |
647 | .nr_pages = nr_pages, | 662 | .nr_pages = nr_pages, |
648 | .sync_mode = WB_SYNC_NONE, | 663 | .sync_mode = WB_SYNC_NONE, |
649 | .range_cyclic = 1, | 664 | .range_cyclic = 1, |
665 | .reason = reason, | ||
650 | }; | 666 | }; |
651 | 667 | ||
652 | spin_lock(&wb->list_lock); | 668 | spin_lock(&wb->list_lock); |
653 | if (list_empty(&wb->b_io)) | 669 | if (list_empty(&wb->b_io)) |
654 | queue_io(wb, NULL); | 670 | queue_io(wb, &work); |
655 | __writeback_inodes_wb(wb, &work); | 671 | __writeback_inodes_wb(wb, &work); |
656 | spin_unlock(&wb->list_lock); | 672 | spin_unlock(&wb->list_lock); |
657 | 673 | ||
658 | return nr_pages - work.nr_pages; | 674 | return nr_pages - work.nr_pages; |
659 | } | 675 | } |
660 | 676 | ||
661 | static inline bool over_bground_thresh(void) | 677 | static bool over_bground_thresh(struct backing_dev_info *bdi) |
662 | { | 678 | { |
663 | unsigned long background_thresh, dirty_thresh; | 679 | unsigned long background_thresh, dirty_thresh; |
664 | 680 | ||
665 | global_dirty_limits(&background_thresh, &dirty_thresh); | 681 | global_dirty_limits(&background_thresh, &dirty_thresh); |
666 | 682 | ||
667 | return (global_page_state(NR_FILE_DIRTY) + | 683 | if (global_page_state(NR_FILE_DIRTY) + |
668 | global_page_state(NR_UNSTABLE_NFS) > background_thresh); | 684 | global_page_state(NR_UNSTABLE_NFS) > background_thresh) |
685 | return true; | ||
686 | |||
687 | if (bdi_stat(bdi, BDI_RECLAIMABLE) > | ||
688 | bdi_dirty_limit(bdi, background_thresh)) | ||
689 | return true; | ||
690 | |||
691 | return false; | ||
669 | } | 692 | } |
670 | 693 | ||
671 | /* | 694 | /* |
@@ -675,7 +698,7 @@ static inline bool over_bground_thresh(void) | |||
675 | static void wb_update_bandwidth(struct bdi_writeback *wb, | 698 | static void wb_update_bandwidth(struct bdi_writeback *wb, |
676 | unsigned long start_time) | 699 | unsigned long start_time) |
677 | { | 700 | { |
678 | __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time); | 701 | __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); |
679 | } | 702 | } |
680 | 703 | ||
681 | /* | 704 | /* |
@@ -727,7 +750,7 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
727 | * For background writeout, stop when we are below the | 750 | * For background writeout, stop when we are below the |
728 | * background dirty threshold | 751 | * background dirty threshold |
729 | */ | 752 | */ |
730 | if (work->for_background && !over_bground_thresh()) | 753 | if (work->for_background && !over_bground_thresh(wb->bdi)) |
731 | break; | 754 | break; |
732 | 755 | ||
733 | if (work->for_kupdate) { | 756 | if (work->for_kupdate) { |
@@ -738,7 +761,7 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
738 | 761 | ||
739 | trace_writeback_start(wb->bdi, work); | 762 | trace_writeback_start(wb->bdi, work); |
740 | if (list_empty(&wb->b_io)) | 763 | if (list_empty(&wb->b_io)) |
741 | queue_io(wb, work->older_than_this); | 764 | queue_io(wb, work); |
742 | if (work->sb) | 765 | if (work->sb) |
743 | progress = writeback_sb_inodes(work->sb, wb, work); | 766 | progress = writeback_sb_inodes(work->sb, wb, work); |
744 | else | 767 | else |
@@ -811,13 +834,14 @@ static unsigned long get_nr_dirty_pages(void) | |||
811 | 834 | ||
812 | static long wb_check_background_flush(struct bdi_writeback *wb) | 835 | static long wb_check_background_flush(struct bdi_writeback *wb) |
813 | { | 836 | { |
814 | if (over_bground_thresh()) { | 837 | if (over_bground_thresh(wb->bdi)) { |
815 | 838 | ||
816 | struct wb_writeback_work work = { | 839 | struct wb_writeback_work work = { |
817 | .nr_pages = LONG_MAX, | 840 | .nr_pages = LONG_MAX, |
818 | .sync_mode = WB_SYNC_NONE, | 841 | .sync_mode = WB_SYNC_NONE, |
819 | .for_background = 1, | 842 | .for_background = 1, |
820 | .range_cyclic = 1, | 843 | .range_cyclic = 1, |
844 | .reason = WB_REASON_BACKGROUND, | ||
821 | }; | 845 | }; |
822 | 846 | ||
823 | return wb_writeback(wb, &work); | 847 | return wb_writeback(wb, &work); |
@@ -851,6 +875,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) | |||
851 | .sync_mode = WB_SYNC_NONE, | 875 | .sync_mode = WB_SYNC_NONE, |
852 | .for_kupdate = 1, | 876 | .for_kupdate = 1, |
853 | .range_cyclic = 1, | 877 | .range_cyclic = 1, |
878 | .reason = WB_REASON_PERIODIC, | ||
854 | }; | 879 | }; |
855 | 880 | ||
856 | return wb_writeback(wb, &work); | 881 | return wb_writeback(wb, &work); |
@@ -969,7 +994,7 @@ int bdi_writeback_thread(void *data) | |||
969 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | 994 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back |
970 | * the whole world. | 995 | * the whole world. |
971 | */ | 996 | */ |
972 | void wakeup_flusher_threads(long nr_pages) | 997 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) |
973 | { | 998 | { |
974 | struct backing_dev_info *bdi; | 999 | struct backing_dev_info *bdi; |
975 | 1000 | ||
@@ -982,7 +1007,7 @@ void wakeup_flusher_threads(long nr_pages) | |||
982 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { | 1007 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { |
983 | if (!bdi_has_dirty_io(bdi)) | 1008 | if (!bdi_has_dirty_io(bdi)) |
984 | continue; | 1009 | continue; |
985 | __bdi_start_writeback(bdi, nr_pages, false); | 1010 | __bdi_start_writeback(bdi, nr_pages, false, reason); |
986 | } | 1011 | } |
987 | rcu_read_unlock(); | 1012 | rcu_read_unlock(); |
988 | } | 1013 | } |
@@ -1203,7 +1228,9 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1203 | * on how many (if any) will be written, and this function does not wait | 1228 | * on how many (if any) will be written, and this function does not wait |
1204 | * for IO completion of submitted IO. | 1229 | * for IO completion of submitted IO. |
1205 | */ | 1230 | */ |
1206 | void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) | 1231 | void writeback_inodes_sb_nr(struct super_block *sb, |
1232 | unsigned long nr, | ||
1233 | enum wb_reason reason) | ||
1207 | { | 1234 | { |
1208 | DECLARE_COMPLETION_ONSTACK(done); | 1235 | DECLARE_COMPLETION_ONSTACK(done); |
1209 | struct wb_writeback_work work = { | 1236 | struct wb_writeback_work work = { |
@@ -1212,6 +1239,7 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) | |||
1212 | .tagged_writepages = 1, | 1239 | .tagged_writepages = 1, |
1213 | .done = &done, | 1240 | .done = &done, |
1214 | .nr_pages = nr, | 1241 | .nr_pages = nr, |
1242 | .reason = reason, | ||
1215 | }; | 1243 | }; |
1216 | 1244 | ||
1217 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 1245 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
@@ -1228,9 +1256,9 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr); | |||
1228 | * on how many (if any) will be written, and this function does not wait | 1256 | * on how many (if any) will be written, and this function does not wait |
1229 | * for IO completion of submitted IO. | 1257 | * for IO completion of submitted IO. |
1230 | */ | 1258 | */ |
1231 | void writeback_inodes_sb(struct super_block *sb) | 1259 | void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) |
1232 | { | 1260 | { |
1233 | return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); | 1261 | return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); |
1234 | } | 1262 | } |
1235 | EXPORT_SYMBOL(writeback_inodes_sb); | 1263 | EXPORT_SYMBOL(writeback_inodes_sb); |
1236 | 1264 | ||
@@ -1241,11 +1269,11 @@ EXPORT_SYMBOL(writeback_inodes_sb); | |||
1241 | * Invoke writeback_inodes_sb if no writeback is currently underway. | 1269 | * Invoke writeback_inodes_sb if no writeback is currently underway. |
1242 | * Returns 1 if writeback was started, 0 if not. | 1270 | * Returns 1 if writeback was started, 0 if not. |
1243 | */ | 1271 | */ |
1244 | int writeback_inodes_sb_if_idle(struct super_block *sb) | 1272 | int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason) |
1245 | { | 1273 | { |
1246 | if (!writeback_in_progress(sb->s_bdi)) { | 1274 | if (!writeback_in_progress(sb->s_bdi)) { |
1247 | down_read(&sb->s_umount); | 1275 | down_read(&sb->s_umount); |
1248 | writeback_inodes_sb(sb); | 1276 | writeback_inodes_sb(sb, reason); |
1249 | up_read(&sb->s_umount); | 1277 | up_read(&sb->s_umount); |
1250 | return 1; | 1278 | return 1; |
1251 | } else | 1279 | } else |
@@ -1262,11 +1290,12 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); | |||
1262 | * Returns 1 if writeback was started, 0 if not. | 1290 | * Returns 1 if writeback was started, 0 if not. |
1263 | */ | 1291 | */ |
1264 | int writeback_inodes_sb_nr_if_idle(struct super_block *sb, | 1292 | int writeback_inodes_sb_nr_if_idle(struct super_block *sb, |
1265 | unsigned long nr) | 1293 | unsigned long nr, |
1294 | enum wb_reason reason) | ||
1266 | { | 1295 | { |
1267 | if (!writeback_in_progress(sb->s_bdi)) { | 1296 | if (!writeback_in_progress(sb->s_bdi)) { |
1268 | down_read(&sb->s_umount); | 1297 | down_read(&sb->s_umount); |
1269 | writeback_inodes_sb_nr(sb, nr); | 1298 | writeback_inodes_sb_nr(sb, nr, reason); |
1270 | up_read(&sb->s_umount); | 1299 | up_read(&sb->s_umount); |
1271 | return 1; | 1300 | return 1; |
1272 | } else | 1301 | } else |
@@ -1290,6 +1319,7 @@ void sync_inodes_sb(struct super_block *sb) | |||
1290 | .nr_pages = LONG_MAX, | 1319 | .nr_pages = LONG_MAX, |
1291 | .range_cyclic = 0, | 1320 | .range_cyclic = 0, |
1292 | .done = &done, | 1321 | .done = &done, |
1322 | .reason = WB_REASON_SYNC, | ||
1293 | }; | 1323 | }; |
1294 | 1324 | ||
1295 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 1325 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
diff --git a/fs/quota/quota.c b/fs/quota/quota.c index aae0edb95c6c..35f4b0ecdeb3 100644 --- a/fs/quota/quota.c +++ b/fs/quota/quota.c | |||
@@ -286,7 +286,7 @@ static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id, | |||
286 | /* caller already holds s_umount */ | 286 | /* caller already holds s_umount */ |
287 | if (sb->s_flags & MS_RDONLY) | 287 | if (sb->s_flags & MS_RDONLY) |
288 | return -EROFS; | 288 | return -EROFS; |
289 | writeback_inodes_sb(sb); | 289 | writeback_inodes_sb(sb, WB_REASON_SYNC); |
290 | return 0; | 290 | return 0; |
291 | default: | 291 | default: |
292 | return -EINVAL; | 292 | return -EINVAL; |
@@ -43,7 +43,7 @@ static int __sync_filesystem(struct super_block *sb, int wait) | |||
43 | if (wait) | 43 | if (wait) |
44 | sync_inodes_sb(sb); | 44 | sync_inodes_sb(sb); |
45 | else | 45 | else |
46 | writeback_inodes_sb(sb); | 46 | writeback_inodes_sb(sb, WB_REASON_SYNC); |
47 | 47 | ||
48 | if (sb->s_op->sync_fs) | 48 | if (sb->s_op->sync_fs) |
49 | sb->s_op->sync_fs(sb, wait); | 49 | sb->s_op->sync_fs(sb, wait); |
@@ -98,7 +98,7 @@ static void sync_filesystems(int wait) | |||
98 | */ | 98 | */ |
99 | SYSCALL_DEFINE0(sync) | 99 | SYSCALL_DEFINE0(sync) |
100 | { | 100 | { |
101 | wakeup_flusher_threads(0); | 101 | wakeup_flusher_threads(0, WB_REASON_SYNC); |
102 | sync_filesystems(0); | 102 | sync_filesystems(0); |
103 | sync_filesystems(1); | 103 | sync_filesystems(1); |
104 | if (unlikely(laptop_mode)) | 104 | if (unlikely(laptop_mode)) |
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c index 315de66e52b2..bc4f94b28706 100644 --- a/fs/ubifs/budget.c +++ b/fs/ubifs/budget.c | |||
@@ -63,7 +63,7 @@ | |||
63 | static void shrink_liability(struct ubifs_info *c, int nr_to_write) | 63 | static void shrink_liability(struct ubifs_info *c, int nr_to_write) |
64 | { | 64 | { |
65 | down_read(&c->vfs_sb->s_umount); | 65 | down_read(&c->vfs_sb->s_umount); |
66 | writeback_inodes_sb(c->vfs_sb); | 66 | writeback_inodes_sb(c->vfs_sb, WB_REASON_FS_FREE_SPACE); |
67 | up_read(&c->vfs_sb->s_umount); | 67 | up_read(&c->vfs_sb->s_umount); |
68 | } | 68 | } |
69 | 69 | ||
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h index 3b2f9cb82986..b1038bd686ac 100644 --- a/include/linux/backing-dev.h +++ b/include/linux/backing-dev.h | |||
@@ -40,6 +40,7 @@ typedef int (congested_fn)(void *, int); | |||
40 | enum bdi_stat_item { | 40 | enum bdi_stat_item { |
41 | BDI_RECLAIMABLE, | 41 | BDI_RECLAIMABLE, |
42 | BDI_WRITEBACK, | 42 | BDI_WRITEBACK, |
43 | BDI_DIRTIED, | ||
43 | BDI_WRITTEN, | 44 | BDI_WRITTEN, |
44 | NR_BDI_STAT_ITEMS | 45 | NR_BDI_STAT_ITEMS |
45 | }; | 46 | }; |
@@ -74,10 +75,20 @@ struct backing_dev_info { | |||
74 | struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; | 75 | struct percpu_counter bdi_stat[NR_BDI_STAT_ITEMS]; |
75 | 76 | ||
76 | unsigned long bw_time_stamp; /* last time write bw is updated */ | 77 | unsigned long bw_time_stamp; /* last time write bw is updated */ |
78 | unsigned long dirtied_stamp; | ||
77 | unsigned long written_stamp; /* pages written at bw_time_stamp */ | 79 | unsigned long written_stamp; /* pages written at bw_time_stamp */ |
78 | unsigned long write_bandwidth; /* the estimated write bandwidth */ | 80 | unsigned long write_bandwidth; /* the estimated write bandwidth */ |
79 | unsigned long avg_write_bandwidth; /* further smoothed write bw */ | 81 | unsigned long avg_write_bandwidth; /* further smoothed write bw */ |
80 | 82 | ||
83 | /* | ||
84 | * The base dirty throttle rate, re-calculated on every 200ms. | ||
85 | * All the bdi tasks' dirty rate will be curbed under it. | ||
86 | * @dirty_ratelimit tracks the estimated @balanced_dirty_ratelimit | ||
87 | * in small steps and is much more smooth/stable than the latter. | ||
88 | */ | ||
89 | unsigned long dirty_ratelimit; | ||
90 | unsigned long balanced_dirty_ratelimit; | ||
91 | |||
81 | struct prop_local_percpu completions; | 92 | struct prop_local_percpu completions; |
82 | int dirty_exceeded; | 93 | int dirty_exceeded; |
83 | 94 | ||
@@ -107,7 +118,8 @@ int bdi_register(struct backing_dev_info *bdi, struct device *parent, | |||
107 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); | 118 | int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev); |
108 | void bdi_unregister(struct backing_dev_info *bdi); | 119 | void bdi_unregister(struct backing_dev_info *bdi); |
109 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); | 120 | int bdi_setup_and_register(struct backing_dev_info *, char *, unsigned int); |
110 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages); | 121 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
122 | enum wb_reason reason); | ||
111 | void bdi_start_background_writeback(struct backing_dev_info *bdi); | 123 | void bdi_start_background_writeback(struct backing_dev_info *bdi); |
112 | int bdi_writeback_thread(void *data); | 124 | int bdi_writeback_thread(void *data); |
113 | int bdi_has_dirty_io(struct backing_dev_info *bdi); | 125 | int bdi_has_dirty_io(struct backing_dev_info *bdi); |
diff --git a/include/linux/sched.h b/include/linux/sched.h index e8acce717d2a..68daf4f27e2c 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1522,6 +1522,13 @@ struct task_struct { | |||
1522 | int make_it_fail; | 1522 | int make_it_fail; |
1523 | #endif | 1523 | #endif |
1524 | struct prop_local_single dirties; | 1524 | struct prop_local_single dirties; |
1525 | /* | ||
1526 | * when (nr_dirtied >= nr_dirtied_pause), it's time to call | ||
1527 | * balance_dirty_pages() for some dirty throttling pause | ||
1528 | */ | ||
1529 | int nr_dirtied; | ||
1530 | int nr_dirtied_pause; | ||
1531 | |||
1525 | #ifdef CONFIG_LATENCYTOP | 1532 | #ifdef CONFIG_LATENCYTOP |
1526 | int latency_record_count; | 1533 | int latency_record_count; |
1527 | struct latency_record latency_record[LT_SAVECOUNT]; | 1534 | struct latency_record latency_record[LT_SAVECOUNT]; |
diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 2b8963ff0f35..a378c295851f 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h | |||
@@ -39,6 +39,23 @@ enum writeback_sync_modes { | |||
39 | }; | 39 | }; |
40 | 40 | ||
41 | /* | 41 | /* |
42 | * why some writeback work was initiated | ||
43 | */ | ||
44 | enum wb_reason { | ||
45 | WB_REASON_BACKGROUND, | ||
46 | WB_REASON_TRY_TO_FREE_PAGES, | ||
47 | WB_REASON_SYNC, | ||
48 | WB_REASON_PERIODIC, | ||
49 | WB_REASON_LAPTOP_TIMER, | ||
50 | WB_REASON_FREE_MORE_MEM, | ||
51 | WB_REASON_FS_FREE_SPACE, | ||
52 | WB_REASON_FORKER_THREAD, | ||
53 | |||
54 | WB_REASON_MAX, | ||
55 | }; | ||
56 | extern const char *wb_reason_name[]; | ||
57 | |||
58 | /* | ||
42 | * A control structure which tells the writeback code what to do. These are | 59 | * A control structure which tells the writeback code what to do. These are |
43 | * always on the stack, and hence need no locking. They are always initialised | 60 | * always on the stack, and hence need no locking. They are always initialised |
44 | * in a manner such that unspecified fields are set to zero. | 61 | * in a manner such that unspecified fields are set to zero. |
@@ -69,14 +86,17 @@ struct writeback_control { | |||
69 | */ | 86 | */ |
70 | struct bdi_writeback; | 87 | struct bdi_writeback; |
71 | int inode_wait(void *); | 88 | int inode_wait(void *); |
72 | void writeback_inodes_sb(struct super_block *); | 89 | void writeback_inodes_sb(struct super_block *, enum wb_reason reason); |
73 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr); | 90 | void writeback_inodes_sb_nr(struct super_block *, unsigned long nr, |
74 | int writeback_inodes_sb_if_idle(struct super_block *); | 91 | enum wb_reason reason); |
75 | int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr); | 92 | int writeback_inodes_sb_if_idle(struct super_block *, enum wb_reason reason); |
93 | int writeback_inodes_sb_nr_if_idle(struct super_block *, unsigned long nr, | ||
94 | enum wb_reason reason); | ||
76 | void sync_inodes_sb(struct super_block *); | 95 | void sync_inodes_sb(struct super_block *); |
77 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages); | 96 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, |
97 | enum wb_reason reason); | ||
78 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); | 98 | long wb_do_writeback(struct bdi_writeback *wb, int force_wait); |
79 | void wakeup_flusher_threads(long nr_pages); | 99 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); |
80 | 100 | ||
81 | /* writeback.h requires fs.h; it, too, is not included from here. */ | 101 | /* writeback.h requires fs.h; it, too, is not included from here. */ |
82 | static inline void wait_on_inode(struct inode *inode) | 102 | static inline void wait_on_inode(struct inode *inode) |
@@ -143,6 +163,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, | |||
143 | 163 | ||
144 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, | 164 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, |
145 | unsigned long thresh, | 165 | unsigned long thresh, |
166 | unsigned long bg_thresh, | ||
146 | unsigned long dirty, | 167 | unsigned long dirty, |
147 | unsigned long bdi_thresh, | 168 | unsigned long bdi_thresh, |
148 | unsigned long bdi_dirty, | 169 | unsigned long bdi_dirty, |
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h index 5f172703eb4f..b99caa8b780c 100644 --- a/include/trace/events/writeback.h +++ b/include/trace/events/writeback.h | |||
@@ -34,6 +34,7 @@ DECLARE_EVENT_CLASS(writeback_work_class, | |||
34 | __field(int, for_kupdate) | 34 | __field(int, for_kupdate) |
35 | __field(int, range_cyclic) | 35 | __field(int, range_cyclic) |
36 | __field(int, for_background) | 36 | __field(int, for_background) |
37 | __field(int, reason) | ||
37 | ), | 38 | ), |
38 | TP_fast_assign( | 39 | TP_fast_assign( |
39 | strncpy(__entry->name, dev_name(bdi->dev), 32); | 40 | strncpy(__entry->name, dev_name(bdi->dev), 32); |
@@ -43,16 +44,18 @@ DECLARE_EVENT_CLASS(writeback_work_class, | |||
43 | __entry->for_kupdate = work->for_kupdate; | 44 | __entry->for_kupdate = work->for_kupdate; |
44 | __entry->range_cyclic = work->range_cyclic; | 45 | __entry->range_cyclic = work->range_cyclic; |
45 | __entry->for_background = work->for_background; | 46 | __entry->for_background = work->for_background; |
47 | __entry->reason = work->reason; | ||
46 | ), | 48 | ), |
47 | TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " | 49 | TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " |
48 | "kupdate=%d range_cyclic=%d background=%d", | 50 | "kupdate=%d range_cyclic=%d background=%d reason=%s", |
49 | __entry->name, | 51 | __entry->name, |
50 | MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), | 52 | MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), |
51 | __entry->nr_pages, | 53 | __entry->nr_pages, |
52 | __entry->sync_mode, | 54 | __entry->sync_mode, |
53 | __entry->for_kupdate, | 55 | __entry->for_kupdate, |
54 | __entry->range_cyclic, | 56 | __entry->range_cyclic, |
55 | __entry->for_background | 57 | __entry->for_background, |
58 | wb_reason_name[__entry->reason] | ||
56 | ) | 59 | ) |
57 | ); | 60 | ); |
58 | #define DEFINE_WRITEBACK_WORK_EVENT(name) \ | 61 | #define DEFINE_WRITEBACK_WORK_EVENT(name) \ |
@@ -104,30 +107,6 @@ DEFINE_WRITEBACK_EVENT(writeback_bdi_register); | |||
104 | DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); | 107 | DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister); |
105 | DEFINE_WRITEBACK_EVENT(writeback_thread_start); | 108 | DEFINE_WRITEBACK_EVENT(writeback_thread_start); |
106 | DEFINE_WRITEBACK_EVENT(writeback_thread_stop); | 109 | DEFINE_WRITEBACK_EVENT(writeback_thread_stop); |
107 | DEFINE_WRITEBACK_EVENT(balance_dirty_start); | ||
108 | DEFINE_WRITEBACK_EVENT(balance_dirty_wait); | ||
109 | |||
110 | TRACE_EVENT(balance_dirty_written, | ||
111 | |||
112 | TP_PROTO(struct backing_dev_info *bdi, int written), | ||
113 | |||
114 | TP_ARGS(bdi, written), | ||
115 | |||
116 | TP_STRUCT__entry( | ||
117 | __array(char, name, 32) | ||
118 | __field(int, written) | ||
119 | ), | ||
120 | |||
121 | TP_fast_assign( | ||
122 | strncpy(__entry->name, dev_name(bdi->dev), 32); | ||
123 | __entry->written = written; | ||
124 | ), | ||
125 | |||
126 | TP_printk("bdi %s written %d", | ||
127 | __entry->name, | ||
128 | __entry->written | ||
129 | ) | ||
130 | ); | ||
131 | 110 | ||
132 | DECLARE_EVENT_CLASS(wbc_class, | 111 | DECLARE_EVENT_CLASS(wbc_class, |
133 | TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), | 112 | TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi), |
@@ -181,27 +160,31 @@ DEFINE_WBC_EVENT(wbc_writepage); | |||
181 | 160 | ||
182 | TRACE_EVENT(writeback_queue_io, | 161 | TRACE_EVENT(writeback_queue_io, |
183 | TP_PROTO(struct bdi_writeback *wb, | 162 | TP_PROTO(struct bdi_writeback *wb, |
184 | unsigned long *older_than_this, | 163 | struct wb_writeback_work *work, |
185 | int moved), | 164 | int moved), |
186 | TP_ARGS(wb, older_than_this, moved), | 165 | TP_ARGS(wb, work, moved), |
187 | TP_STRUCT__entry( | 166 | TP_STRUCT__entry( |
188 | __array(char, name, 32) | 167 | __array(char, name, 32) |
189 | __field(unsigned long, older) | 168 | __field(unsigned long, older) |
190 | __field(long, age) | 169 | __field(long, age) |
191 | __field(int, moved) | 170 | __field(int, moved) |
171 | __field(int, reason) | ||
192 | ), | 172 | ), |
193 | TP_fast_assign( | 173 | TP_fast_assign( |
174 | unsigned long *older_than_this = work->older_than_this; | ||
194 | strncpy(__entry->name, dev_name(wb->bdi->dev), 32); | 175 | strncpy(__entry->name, dev_name(wb->bdi->dev), 32); |
195 | __entry->older = older_than_this ? *older_than_this : 0; | 176 | __entry->older = older_than_this ? *older_than_this : 0; |
196 | __entry->age = older_than_this ? | 177 | __entry->age = older_than_this ? |
197 | (jiffies - *older_than_this) * 1000 / HZ : -1; | 178 | (jiffies - *older_than_this) * 1000 / HZ : -1; |
198 | __entry->moved = moved; | 179 | __entry->moved = moved; |
180 | __entry->reason = work->reason; | ||
199 | ), | 181 | ), |
200 | TP_printk("bdi %s: older=%lu age=%ld enqueue=%d", | 182 | TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s", |
201 | __entry->name, | 183 | __entry->name, |
202 | __entry->older, /* older_than_this in jiffies */ | 184 | __entry->older, /* older_than_this in jiffies */ |
203 | __entry->age, /* older_than_this in relative milliseconds */ | 185 | __entry->age, /* older_than_this in relative milliseconds */ |
204 | __entry->moved) | 186 | __entry->moved, |
187 | wb_reason_name[__entry->reason]) | ||
205 | ); | 188 | ); |
206 | 189 | ||
207 | TRACE_EVENT(global_dirty_state, | 190 | TRACE_EVENT(global_dirty_state, |
@@ -250,6 +233,124 @@ TRACE_EVENT(global_dirty_state, | |||
250 | ) | 233 | ) |
251 | ); | 234 | ); |
252 | 235 | ||
236 | #define KBps(x) ((x) << (PAGE_SHIFT - 10)) | ||
237 | |||
238 | TRACE_EVENT(bdi_dirty_ratelimit, | ||
239 | |||
240 | TP_PROTO(struct backing_dev_info *bdi, | ||
241 | unsigned long dirty_rate, | ||
242 | unsigned long task_ratelimit), | ||
243 | |||
244 | TP_ARGS(bdi, dirty_rate, task_ratelimit), | ||
245 | |||
246 | TP_STRUCT__entry( | ||
247 | __array(char, bdi, 32) | ||
248 | __field(unsigned long, write_bw) | ||
249 | __field(unsigned long, avg_write_bw) | ||
250 | __field(unsigned long, dirty_rate) | ||
251 | __field(unsigned long, dirty_ratelimit) | ||
252 | __field(unsigned long, task_ratelimit) | ||
253 | __field(unsigned long, balanced_dirty_ratelimit) | ||
254 | ), | ||
255 | |||
256 | TP_fast_assign( | ||
257 | strlcpy(__entry->bdi, dev_name(bdi->dev), 32); | ||
258 | __entry->write_bw = KBps(bdi->write_bandwidth); | ||
259 | __entry->avg_write_bw = KBps(bdi->avg_write_bandwidth); | ||
260 | __entry->dirty_rate = KBps(dirty_rate); | ||
261 | __entry->dirty_ratelimit = KBps(bdi->dirty_ratelimit); | ||
262 | __entry->task_ratelimit = KBps(task_ratelimit); | ||
263 | __entry->balanced_dirty_ratelimit = | ||
264 | KBps(bdi->balanced_dirty_ratelimit); | ||
265 | ), | ||
266 | |||
267 | TP_printk("bdi %s: " | ||
268 | "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " | ||
269 | "dirty_ratelimit=%lu task_ratelimit=%lu " | ||
270 | "balanced_dirty_ratelimit=%lu", | ||
271 | __entry->bdi, | ||
272 | __entry->write_bw, /* write bandwidth */ | ||
273 | __entry->avg_write_bw, /* avg write bandwidth */ | ||
274 | __entry->dirty_rate, /* bdi dirty rate */ | ||
275 | __entry->dirty_ratelimit, /* base ratelimit */ | ||
276 | __entry->task_ratelimit, /* ratelimit with position control */ | ||
277 | __entry->balanced_dirty_ratelimit /* the balanced ratelimit */ | ||
278 | ) | ||
279 | ); | ||
280 | |||
281 | TRACE_EVENT(balance_dirty_pages, | ||
282 | |||
283 | TP_PROTO(struct backing_dev_info *bdi, | ||
284 | unsigned long thresh, | ||
285 | unsigned long bg_thresh, | ||
286 | unsigned long dirty, | ||
287 | unsigned long bdi_thresh, | ||
288 | unsigned long bdi_dirty, | ||
289 | unsigned long dirty_ratelimit, | ||
290 | unsigned long task_ratelimit, | ||
291 | unsigned long dirtied, | ||
292 | long pause, | ||
293 | unsigned long start_time), | ||
294 | |||
295 | TP_ARGS(bdi, thresh, bg_thresh, dirty, bdi_thresh, bdi_dirty, | ||
296 | dirty_ratelimit, task_ratelimit, | ||
297 | dirtied, pause, start_time), | ||
298 | |||
299 | TP_STRUCT__entry( | ||
300 | __array( char, bdi, 32) | ||
301 | __field(unsigned long, limit) | ||
302 | __field(unsigned long, setpoint) | ||
303 | __field(unsigned long, dirty) | ||
304 | __field(unsigned long, bdi_setpoint) | ||
305 | __field(unsigned long, bdi_dirty) | ||
306 | __field(unsigned long, dirty_ratelimit) | ||
307 | __field(unsigned long, task_ratelimit) | ||
308 | __field(unsigned int, dirtied) | ||
309 | __field(unsigned int, dirtied_pause) | ||
310 | __field(unsigned long, paused) | ||
311 | __field( long, pause) | ||
312 | ), | ||
313 | |||
314 | TP_fast_assign( | ||
315 | unsigned long freerun = (thresh + bg_thresh) / 2; | ||
316 | strlcpy(__entry->bdi, dev_name(bdi->dev), 32); | ||
317 | |||
318 | __entry->limit = global_dirty_limit; | ||
319 | __entry->setpoint = (global_dirty_limit + freerun) / 2; | ||
320 | __entry->dirty = dirty; | ||
321 | __entry->bdi_setpoint = __entry->setpoint * | ||
322 | bdi_thresh / (thresh + 1); | ||
323 | __entry->bdi_dirty = bdi_dirty; | ||
324 | __entry->dirty_ratelimit = KBps(dirty_ratelimit); | ||
325 | __entry->task_ratelimit = KBps(task_ratelimit); | ||
326 | __entry->dirtied = dirtied; | ||
327 | __entry->dirtied_pause = current->nr_dirtied_pause; | ||
328 | __entry->pause = pause * 1000 / HZ; | ||
329 | __entry->paused = (jiffies - start_time) * 1000 / HZ; | ||
330 | ), | ||
331 | |||
332 | |||
333 | TP_printk("bdi %s: " | ||
334 | "limit=%lu setpoint=%lu dirty=%lu " | ||
335 | "bdi_setpoint=%lu bdi_dirty=%lu " | ||
336 | "dirty_ratelimit=%lu task_ratelimit=%lu " | ||
337 | "dirtied=%u dirtied_pause=%u " | ||
338 | "paused=%lu pause=%ld", | ||
339 | __entry->bdi, | ||
340 | __entry->limit, | ||
341 | __entry->setpoint, | ||
342 | __entry->dirty, | ||
343 | __entry->bdi_setpoint, | ||
344 | __entry->bdi_dirty, | ||
345 | __entry->dirty_ratelimit, | ||
346 | __entry->task_ratelimit, | ||
347 | __entry->dirtied, | ||
348 | __entry->dirtied_pause, | ||
349 | __entry->paused, /* ms */ | ||
350 | __entry->pause /* ms */ | ||
351 | ) | ||
352 | ); | ||
353 | |||
253 | DECLARE_EVENT_CLASS(writeback_congest_waited_template, | 354 | DECLARE_EVENT_CLASS(writeback_congest_waited_template, |
254 | 355 | ||
255 | TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), | 356 | TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), |
diff --git a/kernel/fork.c b/kernel/fork.c index 70d76191afb9..ba0d17261329 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1299,6 +1299,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1299 | p->pdeath_signal = 0; | 1299 | p->pdeath_signal = 0; |
1300 | p->exit_state = 0; | 1300 | p->exit_state = 0; |
1301 | 1301 | ||
1302 | p->nr_dirtied = 0; | ||
1303 | p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10); | ||
1304 | |||
1302 | /* | 1305 | /* |
1303 | * Ok, make it visible to the rest of the system. | 1306 | * Ok, make it visible to the rest of the system. |
1304 | * We dont wake it up yet. | 1307 | * We dont wake it up yet. |
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 7520ef0bfd47..a0860640378d 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -97,6 +97,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
97 | "BdiDirtyThresh: %10lu kB\n" | 97 | "BdiDirtyThresh: %10lu kB\n" |
98 | "DirtyThresh: %10lu kB\n" | 98 | "DirtyThresh: %10lu kB\n" |
99 | "BackgroundThresh: %10lu kB\n" | 99 | "BackgroundThresh: %10lu kB\n" |
100 | "BdiDirtied: %10lu kB\n" | ||
100 | "BdiWritten: %10lu kB\n" | 101 | "BdiWritten: %10lu kB\n" |
101 | "BdiWriteBandwidth: %10lu kBps\n" | 102 | "BdiWriteBandwidth: %10lu kBps\n" |
102 | "b_dirty: %10lu\n" | 103 | "b_dirty: %10lu\n" |
@@ -109,6 +110,7 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
109 | K(bdi_thresh), | 110 | K(bdi_thresh), |
110 | K(dirty_thresh), | 111 | K(dirty_thresh), |
111 | K(background_thresh), | 112 | K(background_thresh), |
113 | (unsigned long) K(bdi_stat(bdi, BDI_DIRTIED)), | ||
112 | (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), | 114 | (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), |
113 | (unsigned long) K(bdi->write_bandwidth), | 115 | (unsigned long) K(bdi->write_bandwidth), |
114 | nr_dirty, | 116 | nr_dirty, |
@@ -473,7 +475,8 @@ static int bdi_forker_thread(void *ptr) | |||
473 | * the bdi from the thread. Hopefully 1024 is | 475 | * the bdi from the thread. Hopefully 1024 is |
474 | * large enough for efficient IO. | 476 | * large enough for efficient IO. |
475 | */ | 477 | */ |
476 | writeback_inodes_wb(&bdi->wb, 1024); | 478 | writeback_inodes_wb(&bdi->wb, 1024, |
479 | WB_REASON_FORKER_THREAD); | ||
477 | } else { | 480 | } else { |
478 | /* | 481 | /* |
479 | * The spinlock makes sure we do not lose | 482 | * The spinlock makes sure we do not lose |
@@ -683,6 +686,8 @@ int bdi_init(struct backing_dev_info *bdi) | |||
683 | bdi->bw_time_stamp = jiffies; | 686 | bdi->bw_time_stamp = jiffies; |
684 | bdi->written_stamp = 0; | 687 | bdi->written_stamp = 0; |
685 | 688 | ||
689 | bdi->balanced_dirty_ratelimit = INIT_BW; | ||
690 | bdi->dirty_ratelimit = INIT_BW; | ||
686 | bdi->write_bandwidth = INIT_BW; | 691 | bdi->write_bandwidth = INIT_BW; |
687 | bdi->avg_write_bandwidth = INIT_BW; | 692 | bdi->avg_write_bandwidth = INIT_BW; |
688 | 693 | ||
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 793e9874de51..9c31199dae9a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c | |||
@@ -46,26 +46,14 @@ | |||
46 | */ | 46 | */ |
47 | #define BANDWIDTH_INTERVAL max(HZ/5, 1) | 47 | #define BANDWIDTH_INTERVAL max(HZ/5, 1) |
48 | 48 | ||
49 | #define RATELIMIT_CALC_SHIFT 10 | ||
50 | |||
49 | /* | 51 | /* |
50 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited | 52 | * After a CPU has dirtied this many pages, balance_dirty_pages_ratelimited |
51 | * will look to see if it needs to force writeback or throttling. | 53 | * will look to see if it needs to force writeback or throttling. |
52 | */ | 54 | */ |
53 | static long ratelimit_pages = 32; | 55 | static long ratelimit_pages = 32; |
54 | 56 | ||
55 | /* | ||
56 | * When balance_dirty_pages decides that the caller needs to perform some | ||
57 | * non-background writeback, this is how many pages it will attempt to write. | ||
58 | * It should be somewhat larger than dirtied pages to ensure that reasonably | ||
59 | * large amounts of I/O are submitted. | ||
60 | */ | ||
61 | static inline long sync_writeback_pages(unsigned long dirtied) | ||
62 | { | ||
63 | if (dirtied < ratelimit_pages) | ||
64 | dirtied = ratelimit_pages; | ||
65 | |||
66 | return dirtied + dirtied / 2; | ||
67 | } | ||
68 | |||
69 | /* The following parameters are exported via /proc/sys/vm */ | 57 | /* The following parameters are exported via /proc/sys/vm */ |
70 | 58 | ||
71 | /* | 59 | /* |
@@ -167,6 +155,8 @@ static void update_completion_period(void) | |||
167 | int shift = calc_period_shift(); | 155 | int shift = calc_period_shift(); |
168 | prop_change_shift(&vm_completions, shift); | 156 | prop_change_shift(&vm_completions, shift); |
169 | prop_change_shift(&vm_dirties, shift); | 157 | prop_change_shift(&vm_dirties, shift); |
158 | |||
159 | writeback_set_ratelimit(); | ||
170 | } | 160 | } |
171 | 161 | ||
172 | int dirty_background_ratio_handler(struct ctl_table *table, int write, | 162 | int dirty_background_ratio_handler(struct ctl_table *table, int write, |
@@ -260,50 +250,6 @@ static void bdi_writeout_fraction(struct backing_dev_info *bdi, | |||
260 | numerator, denominator); | 250 | numerator, denominator); |
261 | } | 251 | } |
262 | 252 | ||
263 | static inline void task_dirties_fraction(struct task_struct *tsk, | ||
264 | long *numerator, long *denominator) | ||
265 | { | ||
266 | prop_fraction_single(&vm_dirties, &tsk->dirties, | ||
267 | numerator, denominator); | ||
268 | } | ||
269 | |||
270 | /* | ||
271 | * task_dirty_limit - scale down dirty throttling threshold for one task | ||
272 | * | ||
273 | * task specific dirty limit: | ||
274 | * | ||
275 | * dirty -= (dirty/8) * p_{t} | ||
276 | * | ||
277 | * To protect light/slow dirtying tasks from heavier/fast ones, we start | ||
278 | * throttling individual tasks before reaching the bdi dirty limit. | ||
279 | * Relatively low thresholds will be allocated to heavy dirtiers. So when | ||
280 | * dirty pages grow large, heavy dirtiers will be throttled first, which will | ||
281 | * effectively curb the growth of dirty pages. Light dirtiers with high enough | ||
282 | * dirty threshold may never get throttled. | ||
283 | */ | ||
284 | #define TASK_LIMIT_FRACTION 8 | ||
285 | static unsigned long task_dirty_limit(struct task_struct *tsk, | ||
286 | unsigned long bdi_dirty) | ||
287 | { | ||
288 | long numerator, denominator; | ||
289 | unsigned long dirty = bdi_dirty; | ||
290 | u64 inv = dirty / TASK_LIMIT_FRACTION; | ||
291 | |||
292 | task_dirties_fraction(tsk, &numerator, &denominator); | ||
293 | inv *= numerator; | ||
294 | do_div(inv, denominator); | ||
295 | |||
296 | dirty -= inv; | ||
297 | |||
298 | return max(dirty, bdi_dirty/2); | ||
299 | } | ||
300 | |||
301 | /* Minimum limit for any task */ | ||
302 | static unsigned long task_min_dirty_limit(unsigned long bdi_dirty) | ||
303 | { | ||
304 | return bdi_dirty - bdi_dirty / TASK_LIMIT_FRACTION; | ||
305 | } | ||
306 | |||
307 | /* | 253 | /* |
308 | * bdi_min_ratio keeps the sum of the minimum dirty shares of all | 254 | * bdi_min_ratio keeps the sum of the minimum dirty shares of all |
309 | * registered backing devices, which, for obvious reasons, can not | 255 | * registered backing devices, which, for obvious reasons, can not |
@@ -413,6 +359,12 @@ unsigned long determine_dirtyable_memory(void) | |||
413 | return x + 1; /* Ensure that we never return 0 */ | 359 | return x + 1; /* Ensure that we never return 0 */ |
414 | } | 360 | } |
415 | 361 | ||
362 | static unsigned long dirty_freerun_ceiling(unsigned long thresh, | ||
363 | unsigned long bg_thresh) | ||
364 | { | ||
365 | return (thresh + bg_thresh) / 2; | ||
366 | } | ||
367 | |||
416 | static unsigned long hard_dirty_limit(unsigned long thresh) | 368 | static unsigned long hard_dirty_limit(unsigned long thresh) |
417 | { | 369 | { |
418 | return max(thresh, global_dirty_limit); | 370 | return max(thresh, global_dirty_limit); |
@@ -497,6 +449,198 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty) | |||
497 | return bdi_dirty; | 449 | return bdi_dirty; |
498 | } | 450 | } |
499 | 451 | ||
452 | /* | ||
453 | * Dirty position control. | ||
454 | * | ||
455 | * (o) global/bdi setpoints | ||
456 | * | ||
457 | * We want the dirty pages be balanced around the global/bdi setpoints. | ||
458 | * When the number of dirty pages is higher/lower than the setpoint, the | ||
459 | * dirty position control ratio (and hence task dirty ratelimit) will be | ||
460 | * decreased/increased to bring the dirty pages back to the setpoint. | ||
461 | * | ||
462 | * pos_ratio = 1 << RATELIMIT_CALC_SHIFT | ||
463 | * | ||
464 | * if (dirty < setpoint) scale up pos_ratio | ||
465 | * if (dirty > setpoint) scale down pos_ratio | ||
466 | * | ||
467 | * if (bdi_dirty < bdi_setpoint) scale up pos_ratio | ||
468 | * if (bdi_dirty > bdi_setpoint) scale down pos_ratio | ||
469 | * | ||
470 | * task_ratelimit = dirty_ratelimit * pos_ratio >> RATELIMIT_CALC_SHIFT | ||
471 | * | ||
472 | * (o) global control line | ||
473 | * | ||
474 | * ^ pos_ratio | ||
475 | * | | ||
476 | * | |<===== global dirty control scope ======>| | ||
477 | * 2.0 .............* | ||
478 | * | .* | ||
479 | * | . * | ||
480 | * | . * | ||
481 | * | . * | ||
482 | * | . * | ||
483 | * | . * | ||
484 | * 1.0 ................................* | ||
485 | * | . . * | ||
486 | * | . . * | ||
487 | * | . . * | ||
488 | * | . . * | ||
489 | * | . . * | ||
490 | * 0 +------------.------------------.----------------------*-------------> | ||
491 | * freerun^ setpoint^ limit^ dirty pages | ||
492 | * | ||
493 | * (o) bdi control line | ||
494 | * | ||
495 | * ^ pos_ratio | ||
496 | * | | ||
497 | * | * | ||
498 | * | * | ||
499 | * | * | ||
500 | * | * | ||
501 | * | * |<=========== span ============>| | ||
502 | * 1.0 .......................* | ||
503 | * | . * | ||
504 | * | . * | ||
505 | * | . * | ||
506 | * | . * | ||
507 | * | . * | ||
508 | * | . * | ||
509 | * | . * | ||
510 | * | . * | ||
511 | * | . * | ||
512 | * | . * | ||
513 | * | . * | ||
514 | * 1/4 ...............................................* * * * * * * * * * * * | ||
515 | * | . . | ||
516 | * | . . | ||
517 | * | . . | ||
518 | * 0 +----------------------.-------------------------------.-------------> | ||
519 | * bdi_setpoint^ x_intercept^ | ||
520 | * | ||
521 | * The bdi control line won't drop below pos_ratio=1/4, so that bdi_dirty can | ||
522 | * be smoothly throttled down to normal if it starts high in situations like | ||
523 | * - start writing to a slow SD card and a fast disk at the same time. The SD | ||
524 | * card's bdi_dirty may rush to many times higher than bdi_setpoint. | ||
525 | * - the bdi dirty thresh drops quickly due to change of JBOD workload | ||
526 | */ | ||
527 | static unsigned long bdi_position_ratio(struct backing_dev_info *bdi, | ||
528 | unsigned long thresh, | ||
529 | unsigned long bg_thresh, | ||
530 | unsigned long dirty, | ||
531 | unsigned long bdi_thresh, | ||
532 | unsigned long bdi_dirty) | ||
533 | { | ||
534 | unsigned long write_bw = bdi->avg_write_bandwidth; | ||
535 | unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); | ||
536 | unsigned long limit = hard_dirty_limit(thresh); | ||
537 | unsigned long x_intercept; | ||
538 | unsigned long setpoint; /* dirty pages' target balance point */ | ||
539 | unsigned long bdi_setpoint; | ||
540 | unsigned long span; | ||
541 | long long pos_ratio; /* for scaling up/down the rate limit */ | ||
542 | long x; | ||
543 | |||
544 | if (unlikely(dirty >= limit)) | ||
545 | return 0; | ||
546 | |||
547 | /* | ||
548 | * global setpoint | ||
549 | * | ||
550 | * setpoint - dirty 3 | ||
551 | * f(dirty) := 1.0 + (----------------) | ||
552 | * limit - setpoint | ||
553 | * | ||
554 | * it's a 3rd order polynomial that subjects to | ||
555 | * | ||
556 | * (1) f(freerun) = 2.0 => rampup dirty_ratelimit reasonably fast | ||
557 | * (2) f(setpoint) = 1.0 => the balance point | ||
558 | * (3) f(limit) = 0 => the hard limit | ||
559 | * (4) df/dx <= 0 => negative feedback control | ||
560 | * (5) the closer to setpoint, the smaller |df/dx| (and the reverse) | ||
561 | * => fast response on large errors; small oscillation near setpoint | ||
562 | */ | ||
563 | setpoint = (freerun + limit) / 2; | ||
564 | x = div_s64((setpoint - dirty) << RATELIMIT_CALC_SHIFT, | ||
565 | limit - setpoint + 1); | ||
566 | pos_ratio = x; | ||
567 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | ||
568 | pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT; | ||
569 | pos_ratio += 1 << RATELIMIT_CALC_SHIFT; | ||
570 | |||
571 | /* | ||
572 | * We have computed basic pos_ratio above based on global situation. If | ||
573 | * the bdi is over/under its share of dirty pages, we want to scale | ||
574 | * pos_ratio further down/up. That is done by the following mechanism. | ||
575 | */ | ||
576 | |||
577 | /* | ||
578 | * bdi setpoint | ||
579 | * | ||
580 | * f(bdi_dirty) := 1.0 + k * (bdi_dirty - bdi_setpoint) | ||
581 | * | ||
582 | * x_intercept - bdi_dirty | ||
583 | * := -------------------------- | ||
584 | * x_intercept - bdi_setpoint | ||
585 | * | ||
586 | * The main bdi control line is a linear function that subjects to | ||
587 | * | ||
588 | * (1) f(bdi_setpoint) = 1.0 | ||
589 | * (2) k = - 1 / (8 * write_bw) (in single bdi case) | ||
590 | * or equally: x_intercept = bdi_setpoint + 8 * write_bw | ||
591 | * | ||
592 | * For single bdi case, the dirty pages are observed to fluctuate | ||
593 | * regularly within range | ||
594 | * [bdi_setpoint - write_bw/2, bdi_setpoint + write_bw/2] | ||
595 | * for various filesystems, where (2) can yield in a reasonable 12.5% | ||
596 | * fluctuation range for pos_ratio. | ||
597 | * | ||
598 | * For JBOD case, bdi_thresh (not bdi_dirty!) could fluctuate up to its | ||
599 | * own size, so move the slope over accordingly and choose a slope that | ||
600 | * yields 100% pos_ratio fluctuation on suddenly doubled bdi_thresh. | ||
601 | */ | ||
602 | if (unlikely(bdi_thresh > thresh)) | ||
603 | bdi_thresh = thresh; | ||
604 | bdi_thresh = max(bdi_thresh, (limit - dirty) / 8); | ||
605 | /* | ||
606 | * scale global setpoint to bdi's: | ||
607 | * bdi_setpoint = setpoint * bdi_thresh / thresh | ||
608 | */ | ||
609 | x = div_u64((u64)bdi_thresh << 16, thresh + 1); | ||
610 | bdi_setpoint = setpoint * (u64)x >> 16; | ||
611 | /* | ||
612 | * Use span=(8*write_bw) in single bdi case as indicated by | ||
613 | * (thresh - bdi_thresh ~= 0) and transit to bdi_thresh in JBOD case. | ||
614 | * | ||
615 | * bdi_thresh thresh - bdi_thresh | ||
616 | * span = ---------- * (8 * write_bw) + ------------------- * bdi_thresh | ||
617 | * thresh thresh | ||
618 | */ | ||
619 | span = (thresh - bdi_thresh + 8 * write_bw) * (u64)x >> 16; | ||
620 | x_intercept = bdi_setpoint + span; | ||
621 | |||
622 | if (bdi_dirty < x_intercept - span / 4) { | ||
623 | pos_ratio = div_u64(pos_ratio * (x_intercept - bdi_dirty), | ||
624 | x_intercept - bdi_setpoint + 1); | ||
625 | } else | ||
626 | pos_ratio /= 4; | ||
627 | |||
628 | /* | ||
629 | * bdi reserve area, safeguard against dirty pool underrun and disk idle | ||
630 | * It may push the desired control point of global dirty pages higher | ||
631 | * than setpoint. | ||
632 | */ | ||
633 | x_intercept = bdi_thresh / 2; | ||
634 | if (bdi_dirty < x_intercept) { | ||
635 | if (bdi_dirty > x_intercept / 8) | ||
636 | pos_ratio = div_u64(pos_ratio * x_intercept, bdi_dirty); | ||
637 | else | ||
638 | pos_ratio *= 8; | ||
639 | } | ||
640 | |||
641 | return pos_ratio; | ||
642 | } | ||
643 | |||
500 | static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, | 644 | static void bdi_update_write_bandwidth(struct backing_dev_info *bdi, |
501 | unsigned long elapsed, | 645 | unsigned long elapsed, |
502 | unsigned long written) | 646 | unsigned long written) |
@@ -593,8 +737,153 @@ static void global_update_bandwidth(unsigned long thresh, | |||
593 | spin_unlock(&dirty_lock); | 737 | spin_unlock(&dirty_lock); |
594 | } | 738 | } |
595 | 739 | ||
740 | /* | ||
741 | * Maintain bdi->dirty_ratelimit, the base dirty throttle rate. | ||
742 | * | ||
743 | * Normal bdi tasks will be curbed at or below it in long term. | ||
744 | * Obviously it should be around (write_bw / N) when there are N dd tasks. | ||
745 | */ | ||
746 | static void bdi_update_dirty_ratelimit(struct backing_dev_info *bdi, | ||
747 | unsigned long thresh, | ||
748 | unsigned long bg_thresh, | ||
749 | unsigned long dirty, | ||
750 | unsigned long bdi_thresh, | ||
751 | unsigned long bdi_dirty, | ||
752 | unsigned long dirtied, | ||
753 | unsigned long elapsed) | ||
754 | { | ||
755 | unsigned long freerun = dirty_freerun_ceiling(thresh, bg_thresh); | ||
756 | unsigned long limit = hard_dirty_limit(thresh); | ||
757 | unsigned long setpoint = (freerun + limit) / 2; | ||
758 | unsigned long write_bw = bdi->avg_write_bandwidth; | ||
759 | unsigned long dirty_ratelimit = bdi->dirty_ratelimit; | ||
760 | unsigned long dirty_rate; | ||
761 | unsigned long task_ratelimit; | ||
762 | unsigned long balanced_dirty_ratelimit; | ||
763 | unsigned long pos_ratio; | ||
764 | unsigned long step; | ||
765 | unsigned long x; | ||
766 | |||
767 | /* | ||
768 | * The dirty rate will match the writeout rate in long term, except | ||
769 | * when dirty pages are truncated by userspace or re-dirtied by FS. | ||
770 | */ | ||
771 | dirty_rate = (dirtied - bdi->dirtied_stamp) * HZ / elapsed; | ||
772 | |||
773 | pos_ratio = bdi_position_ratio(bdi, thresh, bg_thresh, dirty, | ||
774 | bdi_thresh, bdi_dirty); | ||
775 | /* | ||
776 | * task_ratelimit reflects each dd's dirty rate for the past 200ms. | ||
777 | */ | ||
778 | task_ratelimit = (u64)dirty_ratelimit * | ||
779 | pos_ratio >> RATELIMIT_CALC_SHIFT; | ||
780 | task_ratelimit++; /* it helps rampup dirty_ratelimit from tiny values */ | ||
781 | |||
782 | /* | ||
783 | * A linear estimation of the "balanced" throttle rate. The theory is, | ||
784 | * if there are N dd tasks, each throttled at task_ratelimit, the bdi's | ||
785 | * dirty_rate will be measured to be (N * task_ratelimit). So the below | ||
786 | * formula will yield the balanced rate limit (write_bw / N). | ||
787 | * | ||
788 | * Note that the expanded form is not a pure rate feedback: | ||
789 | * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) (1) | ||
790 | * but also takes pos_ratio into account: | ||
791 | * rate_(i+1) = rate_(i) * (write_bw / dirty_rate) * pos_ratio (2) | ||
792 | * | ||
793 | * (1) is not realistic because pos_ratio also takes part in balancing | ||
794 | * the dirty rate. Consider the state | ||
795 | * pos_ratio = 0.5 (3) | ||
796 | * rate = 2 * (write_bw / N) (4) | ||
797 | * If (1) is used, it will stuck in that state! Because each dd will | ||
798 | * be throttled at | ||
799 | * task_ratelimit = pos_ratio * rate = (write_bw / N) (5) | ||
800 | * yielding | ||
801 | * dirty_rate = N * task_ratelimit = write_bw (6) | ||
802 | * put (6) into (1) we get | ||
803 | * rate_(i+1) = rate_(i) (7) | ||
804 | * | ||
805 | * So we end up using (2) to always keep | ||
806 | * rate_(i+1) ~= (write_bw / N) (8) | ||
807 | * regardless of the value of pos_ratio. As long as (8) is satisfied, | ||
808 | * pos_ratio is able to drive itself to 1.0, which is not only where | ||
809 | * the dirty count meet the setpoint, but also where the slope of | ||
810 | * pos_ratio is most flat and hence task_ratelimit is least fluctuated. | ||
811 | */ | ||
812 | balanced_dirty_ratelimit = div_u64((u64)task_ratelimit * write_bw, | ||
813 | dirty_rate | 1); | ||
814 | |||
815 | /* | ||
816 | * We could safely do this and return immediately: | ||
817 | * | ||
818 | * bdi->dirty_ratelimit = balanced_dirty_ratelimit; | ||
819 | * | ||
820 | * However to get a more stable dirty_ratelimit, the below elaborated | ||
821 | * code makes use of task_ratelimit to filter out sigular points and | ||
822 | * limit the step size. | ||
823 | * | ||
824 | * The below code essentially only uses the relative value of | ||
825 | * | ||
826 | * task_ratelimit - dirty_ratelimit | ||
827 | * = (pos_ratio - 1) * dirty_ratelimit | ||
828 | * | ||
829 | * which reflects the direction and size of dirty position error. | ||
830 | */ | ||
831 | |||
832 | /* | ||
833 | * dirty_ratelimit will follow balanced_dirty_ratelimit iff | ||
834 | * task_ratelimit is on the same side of dirty_ratelimit, too. | ||
835 | * For example, when | ||
836 | * - dirty_ratelimit > balanced_dirty_ratelimit | ||
837 | * - dirty_ratelimit > task_ratelimit (dirty pages are above setpoint) | ||
838 | * lowering dirty_ratelimit will help meet both the position and rate | ||
839 | * control targets. Otherwise, don't update dirty_ratelimit if it will | ||
840 | * only help meet the rate target. After all, what the users ultimately | ||
841 | * feel and care are stable dirty rate and small position error. | ||
842 | * | ||
843 | * |task_ratelimit - dirty_ratelimit| is used to limit the step size | ||
844 | * and filter out the sigular points of balanced_dirty_ratelimit. Which | ||
845 | * keeps jumping around randomly and can even leap far away at times | ||
846 | * due to the small 200ms estimation period of dirty_rate (we want to | ||
847 | * keep that period small to reduce time lags). | ||
848 | */ | ||
849 | step = 0; | ||
850 | if (dirty < setpoint) { | ||
851 | x = min(bdi->balanced_dirty_ratelimit, | ||
852 | min(balanced_dirty_ratelimit, task_ratelimit)); | ||
853 | if (dirty_ratelimit < x) | ||
854 | step = x - dirty_ratelimit; | ||
855 | } else { | ||
856 | x = max(bdi->balanced_dirty_ratelimit, | ||
857 | max(balanced_dirty_ratelimit, task_ratelimit)); | ||
858 | if (dirty_ratelimit > x) | ||
859 | step = dirty_ratelimit - x; | ||
860 | } | ||
861 | |||
862 | /* | ||
863 | * Don't pursue 100% rate matching. It's impossible since the balanced | ||
864 | * rate itself is constantly fluctuating. So decrease the track speed | ||
865 | * when it gets close to the target. Helps eliminate pointless tremors. | ||
866 | */ | ||
867 | step >>= dirty_ratelimit / (2 * step + 1); | ||
868 | /* | ||
869 | * Limit the tracking speed to avoid overshooting. | ||
870 | */ | ||
871 | step = (step + 7) / 8; | ||
872 | |||
873 | if (dirty_ratelimit < balanced_dirty_ratelimit) | ||
874 | dirty_ratelimit += step; | ||
875 | else | ||
876 | dirty_ratelimit -= step; | ||
877 | |||
878 | bdi->dirty_ratelimit = max(dirty_ratelimit, 1UL); | ||
879 | bdi->balanced_dirty_ratelimit = balanced_dirty_ratelimit; | ||
880 | |||
881 | trace_bdi_dirty_ratelimit(bdi, dirty_rate, task_ratelimit); | ||
882 | } | ||
883 | |||
596 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, | 884 | void __bdi_update_bandwidth(struct backing_dev_info *bdi, |
597 | unsigned long thresh, | 885 | unsigned long thresh, |
886 | unsigned long bg_thresh, | ||
598 | unsigned long dirty, | 887 | unsigned long dirty, |
599 | unsigned long bdi_thresh, | 888 | unsigned long bdi_thresh, |
600 | unsigned long bdi_dirty, | 889 | unsigned long bdi_dirty, |
@@ -602,6 +891,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi, | |||
602 | { | 891 | { |
603 | unsigned long now = jiffies; | 892 | unsigned long now = jiffies; |
604 | unsigned long elapsed = now - bdi->bw_time_stamp; | 893 | unsigned long elapsed = now - bdi->bw_time_stamp; |
894 | unsigned long dirtied; | ||
605 | unsigned long written; | 895 | unsigned long written; |
606 | 896 | ||
607 | /* | 897 | /* |
@@ -610,6 +900,7 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi, | |||
610 | if (elapsed < BANDWIDTH_INTERVAL) | 900 | if (elapsed < BANDWIDTH_INTERVAL) |
611 | return; | 901 | return; |
612 | 902 | ||
903 | dirtied = percpu_counter_read(&bdi->bdi_stat[BDI_DIRTIED]); | ||
613 | written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); | 904 | written = percpu_counter_read(&bdi->bdi_stat[BDI_WRITTEN]); |
614 | 905 | ||
615 | /* | 906 | /* |
@@ -619,18 +910,23 @@ void __bdi_update_bandwidth(struct backing_dev_info *bdi, | |||
619 | if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) | 910 | if (elapsed > HZ && time_before(bdi->bw_time_stamp, start_time)) |
620 | goto snapshot; | 911 | goto snapshot; |
621 | 912 | ||
622 | if (thresh) | 913 | if (thresh) { |
623 | global_update_bandwidth(thresh, dirty, now); | 914 | global_update_bandwidth(thresh, dirty, now); |
624 | 915 | bdi_update_dirty_ratelimit(bdi, thresh, bg_thresh, dirty, | |
916 | bdi_thresh, bdi_dirty, | ||
917 | dirtied, elapsed); | ||
918 | } | ||
625 | bdi_update_write_bandwidth(bdi, elapsed, written); | 919 | bdi_update_write_bandwidth(bdi, elapsed, written); |
626 | 920 | ||
627 | snapshot: | 921 | snapshot: |
922 | bdi->dirtied_stamp = dirtied; | ||
628 | bdi->written_stamp = written; | 923 | bdi->written_stamp = written; |
629 | bdi->bw_time_stamp = now; | 924 | bdi->bw_time_stamp = now; |
630 | } | 925 | } |
631 | 926 | ||
632 | static void bdi_update_bandwidth(struct backing_dev_info *bdi, | 927 | static void bdi_update_bandwidth(struct backing_dev_info *bdi, |
633 | unsigned long thresh, | 928 | unsigned long thresh, |
929 | unsigned long bg_thresh, | ||
634 | unsigned long dirty, | 930 | unsigned long dirty, |
635 | unsigned long bdi_thresh, | 931 | unsigned long bdi_thresh, |
636 | unsigned long bdi_dirty, | 932 | unsigned long bdi_dirty, |
@@ -639,37 +935,99 @@ static void bdi_update_bandwidth(struct backing_dev_info *bdi, | |||
639 | if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) | 935 | if (time_is_after_eq_jiffies(bdi->bw_time_stamp + BANDWIDTH_INTERVAL)) |
640 | return; | 936 | return; |
641 | spin_lock(&bdi->wb.list_lock); | 937 | spin_lock(&bdi->wb.list_lock); |
642 | __bdi_update_bandwidth(bdi, thresh, dirty, bdi_thresh, bdi_dirty, | 938 | __bdi_update_bandwidth(bdi, thresh, bg_thresh, dirty, |
643 | start_time); | 939 | bdi_thresh, bdi_dirty, start_time); |
644 | spin_unlock(&bdi->wb.list_lock); | 940 | spin_unlock(&bdi->wb.list_lock); |
645 | } | 941 | } |
646 | 942 | ||
647 | /* | 943 | /* |
944 | * After a task dirtied this many pages, balance_dirty_pages_ratelimited_nr() | ||
945 | * will look to see if it needs to start dirty throttling. | ||
946 | * | ||
947 | * If dirty_poll_interval is too low, big NUMA machines will call the expensive | ||
948 | * global_page_state() too often. So scale it near-sqrt to the safety margin | ||
949 | * (the number of pages we may dirty without exceeding the dirty limits). | ||
950 | */ | ||
951 | static unsigned long dirty_poll_interval(unsigned long dirty, | ||
952 | unsigned long thresh) | ||
953 | { | ||
954 | if (thresh > dirty) | ||
955 | return 1UL << (ilog2(thresh - dirty) >> 1); | ||
956 | |||
957 | return 1; | ||
958 | } | ||
959 | |||
960 | static unsigned long bdi_max_pause(struct backing_dev_info *bdi, | ||
961 | unsigned long bdi_dirty) | ||
962 | { | ||
963 | unsigned long bw = bdi->avg_write_bandwidth; | ||
964 | unsigned long hi = ilog2(bw); | ||
965 | unsigned long lo = ilog2(bdi->dirty_ratelimit); | ||
966 | unsigned long t; | ||
967 | |||
968 | /* target for 20ms max pause on 1-dd case */ | ||
969 | t = HZ / 50; | ||
970 | |||
971 | /* | ||
972 | * Scale up pause time for concurrent dirtiers in order to reduce CPU | ||
973 | * overheads. | ||
974 | * | ||
975 | * (N * 20ms) on 2^N concurrent tasks. | ||
976 | */ | ||
977 | if (hi > lo) | ||
978 | t += (hi - lo) * (20 * HZ) / 1024; | ||
979 | |||
980 | /* | ||
981 | * Limit pause time for small memory systems. If sleeping for too long | ||
982 | * time, a small pool of dirty/writeback pages may go empty and disk go | ||
983 | * idle. | ||
984 | * | ||
985 | * 8 serves as the safety ratio. | ||
986 | */ | ||
987 | if (bdi_dirty) | ||
988 | t = min(t, bdi_dirty * HZ / (8 * bw + 1)); | ||
989 | |||
990 | /* | ||
991 | * The pause time will be settled within range (max_pause/4, max_pause). | ||
992 | * Apply a minimal value of 4 to get a non-zero max_pause/4. | ||
993 | */ | ||
994 | return clamp_val(t, 4, MAX_PAUSE); | ||
995 | } | ||
996 | |||
997 | /* | ||
648 | * balance_dirty_pages() must be called by processes which are generating dirty | 998 | * balance_dirty_pages() must be called by processes which are generating dirty |
649 | * data. It looks at the number of dirty pages in the machine and will force | 999 | * data. It looks at the number of dirty pages in the machine and will force |
650 | * the caller to perform writeback if the system is over `vm_dirty_ratio'. | 1000 | * the caller to wait once crossing the (background_thresh + dirty_thresh) / 2. |
651 | * If we're over `background_thresh' then the writeback threads are woken to | 1001 | * If we're over `background_thresh' then the writeback threads are woken to |
652 | * perform some writeout. | 1002 | * perform some writeout. |
653 | */ | 1003 | */ |
654 | static void balance_dirty_pages(struct address_space *mapping, | 1004 | static void balance_dirty_pages(struct address_space *mapping, |
655 | unsigned long write_chunk) | 1005 | unsigned long pages_dirtied) |
656 | { | 1006 | { |
657 | unsigned long nr_reclaimable, bdi_nr_reclaimable; | 1007 | unsigned long nr_reclaimable; /* = file_dirty + unstable_nfs */ |
1008 | unsigned long bdi_reclaimable; | ||
658 | unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ | 1009 | unsigned long nr_dirty; /* = file_dirty + writeback + unstable_nfs */ |
659 | unsigned long bdi_dirty; | 1010 | unsigned long bdi_dirty; |
1011 | unsigned long freerun; | ||
660 | unsigned long background_thresh; | 1012 | unsigned long background_thresh; |
661 | unsigned long dirty_thresh; | 1013 | unsigned long dirty_thresh; |
662 | unsigned long bdi_thresh; | 1014 | unsigned long bdi_thresh; |
663 | unsigned long task_bdi_thresh; | 1015 | long pause = 0; |
664 | unsigned long min_task_bdi_thresh; | 1016 | long uninitialized_var(max_pause); |
665 | unsigned long pages_written = 0; | ||
666 | unsigned long pause = 1; | ||
667 | bool dirty_exceeded = false; | 1017 | bool dirty_exceeded = false; |
668 | bool clear_dirty_exceeded = true; | 1018 | unsigned long task_ratelimit; |
1019 | unsigned long uninitialized_var(dirty_ratelimit); | ||
1020 | unsigned long pos_ratio; | ||
669 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 1021 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
670 | unsigned long start_time = jiffies; | 1022 | unsigned long start_time = jiffies; |
671 | 1023 | ||
672 | for (;;) { | 1024 | for (;;) { |
1025 | /* | ||
1026 | * Unstable writes are a feature of certain networked | ||
1027 | * filesystems (i.e. NFS) in which data may have been | ||
1028 | * written to the server's write cache, but has not yet | ||
1029 | * been flushed to permanent storage. | ||
1030 | */ | ||
673 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + | 1031 | nr_reclaimable = global_page_state(NR_FILE_DIRTY) + |
674 | global_page_state(NR_UNSTABLE_NFS); | 1032 | global_page_state(NR_UNSTABLE_NFS); |
675 | nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); | 1033 | nr_dirty = nr_reclaimable + global_page_state(NR_WRITEBACK); |
@@ -681,12 +1039,28 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
681 | * catch-up. This avoids (excessively) small writeouts | 1039 | * catch-up. This avoids (excessively) small writeouts |
682 | * when the bdi limits are ramping up. | 1040 | * when the bdi limits are ramping up. |
683 | */ | 1041 | */ |
684 | if (nr_dirty <= (background_thresh + dirty_thresh) / 2) | 1042 | freerun = dirty_freerun_ceiling(dirty_thresh, |
1043 | background_thresh); | ||
1044 | if (nr_dirty <= freerun) | ||
685 | break; | 1045 | break; |
686 | 1046 | ||
1047 | if (unlikely(!writeback_in_progress(bdi))) | ||
1048 | bdi_start_background_writeback(bdi); | ||
1049 | |||
1050 | /* | ||
1051 | * bdi_thresh is not treated as some limiting factor as | ||
1052 | * dirty_thresh, due to reasons | ||
1053 | * - in JBOD setup, bdi_thresh can fluctuate a lot | ||
1054 | * - in a system with HDD and USB key, the USB key may somehow | ||
1055 | * go into state (bdi_dirty >> bdi_thresh) either because | ||
1056 | * bdi_dirty starts high, or because bdi_thresh drops low. | ||
1057 | * In this case we don't want to hard throttle the USB key | ||
1058 | * dirtiers for 100 seconds until bdi_dirty drops under | ||
1059 | * bdi_thresh. Instead the auxiliary bdi control line in | ||
1060 | * bdi_position_ratio() will let the dirtier task progress | ||
1061 | * at some rate <= (write_bw / 2) for bringing down bdi_dirty. | ||
1062 | */ | ||
687 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | 1063 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); |
688 | min_task_bdi_thresh = task_min_dirty_limit(bdi_thresh); | ||
689 | task_bdi_thresh = task_dirty_limit(current, bdi_thresh); | ||
690 | 1064 | ||
691 | /* | 1065 | /* |
692 | * In order to avoid the stacked BDI deadlock we need | 1066 | * In order to avoid the stacked BDI deadlock we need |
@@ -698,56 +1072,69 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
698 | * actually dirty; with m+n sitting in the percpu | 1072 | * actually dirty; with m+n sitting in the percpu |
699 | * deltas. | 1073 | * deltas. |
700 | */ | 1074 | */ |
701 | if (task_bdi_thresh < 2 * bdi_stat_error(bdi)) { | 1075 | if (bdi_thresh < 2 * bdi_stat_error(bdi)) { |
702 | bdi_nr_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); | 1076 | bdi_reclaimable = bdi_stat_sum(bdi, BDI_RECLAIMABLE); |
703 | bdi_dirty = bdi_nr_reclaimable + | 1077 | bdi_dirty = bdi_reclaimable + |
704 | bdi_stat_sum(bdi, BDI_WRITEBACK); | 1078 | bdi_stat_sum(bdi, BDI_WRITEBACK); |
705 | } else { | 1079 | } else { |
706 | bdi_nr_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); | 1080 | bdi_reclaimable = bdi_stat(bdi, BDI_RECLAIMABLE); |
707 | bdi_dirty = bdi_nr_reclaimable + | 1081 | bdi_dirty = bdi_reclaimable + |
708 | bdi_stat(bdi, BDI_WRITEBACK); | 1082 | bdi_stat(bdi, BDI_WRITEBACK); |
709 | } | 1083 | } |
710 | 1084 | ||
711 | /* | 1085 | dirty_exceeded = (bdi_dirty > bdi_thresh) || |
712 | * The bdi thresh is somehow "soft" limit derived from the | ||
713 | * global "hard" limit. The former helps to prevent heavy IO | ||
714 | * bdi or process from holding back light ones; The latter is | ||
715 | * the last resort safeguard. | ||
716 | */ | ||
717 | dirty_exceeded = (bdi_dirty > task_bdi_thresh) || | ||
718 | (nr_dirty > dirty_thresh); | 1086 | (nr_dirty > dirty_thresh); |
719 | clear_dirty_exceeded = (bdi_dirty <= min_task_bdi_thresh) && | 1087 | if (dirty_exceeded && !bdi->dirty_exceeded) |
720 | (nr_dirty <= dirty_thresh); | ||
721 | |||
722 | if (!dirty_exceeded) | ||
723 | break; | ||
724 | |||
725 | if (!bdi->dirty_exceeded) | ||
726 | bdi->dirty_exceeded = 1; | 1088 | bdi->dirty_exceeded = 1; |
727 | 1089 | ||
728 | bdi_update_bandwidth(bdi, dirty_thresh, nr_dirty, | 1090 | bdi_update_bandwidth(bdi, dirty_thresh, background_thresh, |
729 | bdi_thresh, bdi_dirty, start_time); | 1091 | nr_dirty, bdi_thresh, bdi_dirty, |
1092 | start_time); | ||
730 | 1093 | ||
731 | /* Note: nr_reclaimable denotes nr_dirty + nr_unstable. | 1094 | max_pause = bdi_max_pause(bdi, bdi_dirty); |
732 | * Unstable writes are a feature of certain networked | 1095 | |
733 | * filesystems (i.e. NFS) in which data may have been | 1096 | dirty_ratelimit = bdi->dirty_ratelimit; |
734 | * written to the server's write cache, but has not yet | 1097 | pos_ratio = bdi_position_ratio(bdi, dirty_thresh, |
735 | * been flushed to permanent storage. | 1098 | background_thresh, nr_dirty, |
736 | * Only move pages to writeback if this bdi is over its | 1099 | bdi_thresh, bdi_dirty); |
737 | * threshold otherwise wait until the disk writes catch | 1100 | if (unlikely(pos_ratio == 0)) { |
738 | * up. | 1101 | pause = max_pause; |
739 | */ | 1102 | goto pause; |
740 | trace_balance_dirty_start(bdi); | 1103 | } |
741 | if (bdi_nr_reclaimable > task_bdi_thresh) { | 1104 | task_ratelimit = (u64)dirty_ratelimit * |
742 | pages_written += writeback_inodes_wb(&bdi->wb, | 1105 | pos_ratio >> RATELIMIT_CALC_SHIFT; |
743 | write_chunk); | 1106 | pause = (HZ * pages_dirtied) / (task_ratelimit | 1); |
744 | trace_balance_dirty_written(bdi, pages_written); | 1107 | if (unlikely(pause <= 0)) { |
745 | if (pages_written >= write_chunk) | 1108 | trace_balance_dirty_pages(bdi, |
746 | break; /* We've done our duty */ | 1109 | dirty_thresh, |
1110 | background_thresh, | ||
1111 | nr_dirty, | ||
1112 | bdi_thresh, | ||
1113 | bdi_dirty, | ||
1114 | dirty_ratelimit, | ||
1115 | task_ratelimit, | ||
1116 | pages_dirtied, | ||
1117 | pause, | ||
1118 | start_time); | ||
1119 | pause = 1; /* avoid resetting nr_dirtied_pause below */ | ||
1120 | break; | ||
747 | } | 1121 | } |
1122 | pause = min(pause, max_pause); | ||
1123 | |||
1124 | pause: | ||
1125 | trace_balance_dirty_pages(bdi, | ||
1126 | dirty_thresh, | ||
1127 | background_thresh, | ||
1128 | nr_dirty, | ||
1129 | bdi_thresh, | ||
1130 | bdi_dirty, | ||
1131 | dirty_ratelimit, | ||
1132 | task_ratelimit, | ||
1133 | pages_dirtied, | ||
1134 | pause, | ||
1135 | start_time); | ||
748 | __set_current_state(TASK_UNINTERRUPTIBLE); | 1136 | __set_current_state(TASK_UNINTERRUPTIBLE); |
749 | io_schedule_timeout(pause); | 1137 | io_schedule_timeout(pause); |
750 | trace_balance_dirty_wait(bdi); | ||
751 | 1138 | ||
752 | dirty_thresh = hard_dirty_limit(dirty_thresh); | 1139 | dirty_thresh = hard_dirty_limit(dirty_thresh); |
753 | /* | 1140 | /* |
@@ -756,24 +1143,30 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
756 | * 200ms is typically more than enough to curb heavy dirtiers; | 1143 | * 200ms is typically more than enough to curb heavy dirtiers; |
757 | * (b) the pause time limit makes the dirtiers more responsive. | 1144 | * (b) the pause time limit makes the dirtiers more responsive. |
758 | */ | 1145 | */ |
759 | if (nr_dirty < dirty_thresh && | 1146 | if (nr_dirty < dirty_thresh) |
760 | bdi_dirty < (task_bdi_thresh + bdi_thresh) / 2 && | ||
761 | time_after(jiffies, start_time + MAX_PAUSE)) | ||
762 | break; | 1147 | break; |
763 | |||
764 | /* | ||
765 | * Increase the delay for each loop, up to our previous | ||
766 | * default of taking a 100ms nap. | ||
767 | */ | ||
768 | pause <<= 1; | ||
769 | if (pause > HZ / 10) | ||
770 | pause = HZ / 10; | ||
771 | } | 1148 | } |
772 | 1149 | ||
773 | /* Clear dirty_exceeded flag only when no task can exceed the limit */ | 1150 | if (!dirty_exceeded && bdi->dirty_exceeded) |
774 | if (clear_dirty_exceeded && bdi->dirty_exceeded) | ||
775 | bdi->dirty_exceeded = 0; | 1151 | bdi->dirty_exceeded = 0; |
776 | 1152 | ||
1153 | current->nr_dirtied = 0; | ||
1154 | if (pause == 0) { /* in freerun area */ | ||
1155 | current->nr_dirtied_pause = | ||
1156 | dirty_poll_interval(nr_dirty, dirty_thresh); | ||
1157 | } else if (pause <= max_pause / 4 && | ||
1158 | pages_dirtied >= current->nr_dirtied_pause) { | ||
1159 | current->nr_dirtied_pause = clamp_val( | ||
1160 | dirty_ratelimit * (max_pause / 2) / HZ, | ||
1161 | pages_dirtied + pages_dirtied / 8, | ||
1162 | pages_dirtied * 4); | ||
1163 | } else if (pause >= max_pause) { | ||
1164 | current->nr_dirtied_pause = 1 | clamp_val( | ||
1165 | dirty_ratelimit * (max_pause / 2) / HZ, | ||
1166 | pages_dirtied / 4, | ||
1167 | pages_dirtied - pages_dirtied / 8); | ||
1168 | } | ||
1169 | |||
777 | if (writeback_in_progress(bdi)) | 1170 | if (writeback_in_progress(bdi)) |
778 | return; | 1171 | return; |
779 | 1172 | ||
@@ -785,8 +1178,10 @@ static void balance_dirty_pages(struct address_space *mapping, | |||
785 | * In normal mode, we start background writeout at the lower | 1178 | * In normal mode, we start background writeout at the lower |
786 | * background_thresh, to keep the amount of dirty memory low. | 1179 | * background_thresh, to keep the amount of dirty memory low. |
787 | */ | 1180 | */ |
788 | if ((laptop_mode && pages_written) || | 1181 | if (laptop_mode) |
789 | (!laptop_mode && (nr_reclaimable > background_thresh))) | 1182 | return; |
1183 | |||
1184 | if (nr_reclaimable > background_thresh) | ||
790 | bdi_start_background_writeback(bdi); | 1185 | bdi_start_background_writeback(bdi); |
791 | } | 1186 | } |
792 | 1187 | ||
@@ -800,7 +1195,7 @@ void set_page_dirty_balance(struct page *page, int page_mkwrite) | |||
800 | } | 1195 | } |
801 | } | 1196 | } |
802 | 1197 | ||
803 | static DEFINE_PER_CPU(unsigned long, bdp_ratelimits) = 0; | 1198 | static DEFINE_PER_CPU(int, bdp_ratelimits); |
804 | 1199 | ||
805 | /** | 1200 | /** |
806 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state | 1201 | * balance_dirty_pages_ratelimited_nr - balance dirty memory state |
@@ -820,31 +1215,39 @@ void balance_dirty_pages_ratelimited_nr(struct address_space *mapping, | |||
820 | unsigned long nr_pages_dirtied) | 1215 | unsigned long nr_pages_dirtied) |
821 | { | 1216 | { |
822 | struct backing_dev_info *bdi = mapping->backing_dev_info; | 1217 | struct backing_dev_info *bdi = mapping->backing_dev_info; |
823 | unsigned long ratelimit; | 1218 | int ratelimit; |
824 | unsigned long *p; | 1219 | int *p; |
825 | 1220 | ||
826 | if (!bdi_cap_account_dirty(bdi)) | 1221 | if (!bdi_cap_account_dirty(bdi)) |
827 | return; | 1222 | return; |
828 | 1223 | ||
829 | ratelimit = ratelimit_pages; | 1224 | ratelimit = current->nr_dirtied_pause; |
830 | if (mapping->backing_dev_info->dirty_exceeded) | 1225 | if (bdi->dirty_exceeded) |
831 | ratelimit = 8; | 1226 | ratelimit = min(ratelimit, 32 >> (PAGE_SHIFT - 10)); |
1227 | |||
1228 | current->nr_dirtied += nr_pages_dirtied; | ||
832 | 1229 | ||
1230 | preempt_disable(); | ||
833 | /* | 1231 | /* |
834 | * Check the rate limiting. Also, we do not want to throttle real-time | 1232 | * This prevents one CPU to accumulate too many dirtied pages without |
835 | * tasks in balance_dirty_pages(). Period. | 1233 | * calling into balance_dirty_pages(), which can happen when there are |
1234 | * 1000+ tasks, all of them start dirtying pages at exactly the same | ||
1235 | * time, hence all honoured too large initial task->nr_dirtied_pause. | ||
836 | */ | 1236 | */ |
837 | preempt_disable(); | ||
838 | p = &__get_cpu_var(bdp_ratelimits); | 1237 | p = &__get_cpu_var(bdp_ratelimits); |
839 | *p += nr_pages_dirtied; | 1238 | if (unlikely(current->nr_dirtied >= ratelimit)) |
840 | if (unlikely(*p >= ratelimit)) { | ||
841 | ratelimit = sync_writeback_pages(*p); | ||
842 | *p = 0; | 1239 | *p = 0; |
843 | preempt_enable(); | 1240 | else { |
844 | balance_dirty_pages(mapping, ratelimit); | 1241 | *p += nr_pages_dirtied; |
845 | return; | 1242 | if (unlikely(*p >= ratelimit_pages)) { |
1243 | *p = 0; | ||
1244 | ratelimit = 0; | ||
1245 | } | ||
846 | } | 1246 | } |
847 | preempt_enable(); | 1247 | preempt_enable(); |
1248 | |||
1249 | if (unlikely(current->nr_dirtied >= ratelimit)) | ||
1250 | balance_dirty_pages(mapping, current->nr_dirtied); | ||
848 | } | 1251 | } |
849 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); | 1252 | EXPORT_SYMBOL(balance_dirty_pages_ratelimited_nr); |
850 | 1253 | ||
@@ -900,7 +1303,8 @@ void laptop_mode_timer_fn(unsigned long data) | |||
900 | * threshold | 1303 | * threshold |
901 | */ | 1304 | */ |
902 | if (bdi_has_dirty_io(&q->backing_dev_info)) | 1305 | if (bdi_has_dirty_io(&q->backing_dev_info)) |
903 | bdi_start_writeback(&q->backing_dev_info, nr_pages); | 1306 | bdi_start_writeback(&q->backing_dev_info, nr_pages, |
1307 | WB_REASON_LAPTOP_TIMER); | ||
904 | } | 1308 | } |
905 | 1309 | ||
906 | /* | 1310 | /* |
@@ -939,22 +1343,17 @@ void laptop_sync_completion(void) | |||
939 | * | 1343 | * |
940 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are | 1344 | * Here we set ratelimit_pages to a level which ensures that when all CPUs are |
941 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory | 1345 | * dirtying in parallel, we cannot go more than 3% (1/32) over the dirty memory |
942 | * thresholds before writeback cuts in. | 1346 | * thresholds. |
943 | * | ||
944 | * But the limit should not be set too high. Because it also controls the | ||
945 | * amount of memory which the balance_dirty_pages() caller has to write back. | ||
946 | * If this is too large then the caller will block on the IO queue all the | ||
947 | * time. So limit it to four megabytes - the balance_dirty_pages() caller | ||
948 | * will write six megabyte chunks, max. | ||
949 | */ | 1347 | */ |
950 | 1348 | ||
951 | void writeback_set_ratelimit(void) | 1349 | void writeback_set_ratelimit(void) |
952 | { | 1350 | { |
953 | ratelimit_pages = vm_total_pages / (num_online_cpus() * 32); | 1351 | unsigned long background_thresh; |
1352 | unsigned long dirty_thresh; | ||
1353 | global_dirty_limits(&background_thresh, &dirty_thresh); | ||
1354 | ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); | ||
954 | if (ratelimit_pages < 16) | 1355 | if (ratelimit_pages < 16) |
955 | ratelimit_pages = 16; | 1356 | ratelimit_pages = 16; |
956 | if (ratelimit_pages * PAGE_CACHE_SIZE > 4096 * 1024) | ||
957 | ratelimit_pages = (4096 * 1024) / PAGE_CACHE_SIZE; | ||
958 | } | 1357 | } |
959 | 1358 | ||
960 | static int __cpuinit | 1359 | static int __cpuinit |
@@ -1324,6 +1723,7 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) | |||
1324 | __inc_zone_page_state(page, NR_FILE_DIRTY); | 1723 | __inc_zone_page_state(page, NR_FILE_DIRTY); |
1325 | __inc_zone_page_state(page, NR_DIRTIED); | 1724 | __inc_zone_page_state(page, NR_DIRTIED); |
1326 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); | 1725 | __inc_bdi_stat(mapping->backing_dev_info, BDI_RECLAIMABLE); |
1726 | __inc_bdi_stat(mapping->backing_dev_info, BDI_DIRTIED); | ||
1327 | task_dirty_inc(current); | 1727 | task_dirty_inc(current); |
1328 | task_io_account_write(PAGE_CACHE_SIZE); | 1728 | task_io_account_write(PAGE_CACHE_SIZE); |
1329 | } | 1729 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 132d1ddb2238..a1893c050795 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2266,7 +2266,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2266 | */ | 2266 | */ |
2267 | writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; | 2267 | writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; |
2268 | if (total_scanned > writeback_threshold) { | 2268 | if (total_scanned > writeback_threshold) { |
2269 | wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); | 2269 | wakeup_flusher_threads(laptop_mode ? 0 : total_scanned, |
2270 | WB_REASON_TRY_TO_FREE_PAGES); | ||
2270 | sc->may_writepage = 1; | 2271 | sc->may_writepage = 1; |
2271 | } | 2272 | } |
2272 | 2273 | ||