diff options
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r-- | fs/fs-writeback.c | 84 |
1 files changed, 57 insertions, 27 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index 04cf3b91e501..73c3992b2bb4 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c | |||
@@ -41,11 +41,23 @@ struct wb_writeback_work { | |||
41 | unsigned int for_kupdate:1; | 41 | unsigned int for_kupdate:1; |
42 | unsigned int range_cyclic:1; | 42 | unsigned int range_cyclic:1; |
43 | unsigned int for_background:1; | 43 | unsigned int for_background:1; |
44 | enum wb_reason reason; /* why was writeback initiated? */ | ||
44 | 45 | ||
45 | struct list_head list; /* pending work list */ | 46 | struct list_head list; /* pending work list */ |
46 | struct completion *done; /* set if the caller waits */ | 47 | struct completion *done; /* set if the caller waits */ |
47 | }; | 48 | }; |
48 | 49 | ||
50 | const char *wb_reason_name[] = { | ||
51 | [WB_REASON_BACKGROUND] = "background", | ||
52 | [WB_REASON_TRY_TO_FREE_PAGES] = "try_to_free_pages", | ||
53 | [WB_REASON_SYNC] = "sync", | ||
54 | [WB_REASON_PERIODIC] = "periodic", | ||
55 | [WB_REASON_LAPTOP_TIMER] = "laptop_timer", | ||
56 | [WB_REASON_FREE_MORE_MEM] = "free_more_memory", | ||
57 | [WB_REASON_FS_FREE_SPACE] = "fs_free_space", | ||
58 | [WB_REASON_FORKER_THREAD] = "forker_thread" | ||
59 | }; | ||
60 | |||
49 | /* | 61 | /* |
50 | * Include the creation of the trace points after defining the | 62 | * Include the creation of the trace points after defining the |
51 | * wb_writeback_work structure so that the definition remains local to this | 63 | * wb_writeback_work structure so that the definition remains local to this |
@@ -115,7 +127,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi, | |||
115 | 127 | ||
116 | static void | 128 | static void |
117 | __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | 129 | __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
118 | bool range_cyclic) | 130 | bool range_cyclic, enum wb_reason reason) |
119 | { | 131 | { |
120 | struct wb_writeback_work *work; | 132 | struct wb_writeback_work *work; |
121 | 133 | ||
@@ -135,6 +147,7 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | |||
135 | work->sync_mode = WB_SYNC_NONE; | 147 | work->sync_mode = WB_SYNC_NONE; |
136 | work->nr_pages = nr_pages; | 148 | work->nr_pages = nr_pages; |
137 | work->range_cyclic = range_cyclic; | 149 | work->range_cyclic = range_cyclic; |
150 | work->reason = reason; | ||
138 | 151 | ||
139 | bdi_queue_work(bdi, work); | 152 | bdi_queue_work(bdi, work); |
140 | } | 153 | } |
@@ -150,9 +163,10 @@ __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, | |||
150 | * completion. Caller need not hold sb s_umount semaphore. | 163 | * completion. Caller need not hold sb s_umount semaphore. |
151 | * | 164 | * |
152 | */ | 165 | */ |
153 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages) | 166 | void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages, |
167 | enum wb_reason reason) | ||
154 | { | 168 | { |
155 | __bdi_start_writeback(bdi, nr_pages, true); | 169 | __bdi_start_writeback(bdi, nr_pages, true, reason); |
156 | } | 170 | } |
157 | 171 | ||
158 | /** | 172 | /** |
@@ -251,7 +265,7 @@ static bool inode_dirtied_after(struct inode *inode, unsigned long t) | |||
251 | */ | 265 | */ |
252 | static int move_expired_inodes(struct list_head *delaying_queue, | 266 | static int move_expired_inodes(struct list_head *delaying_queue, |
253 | struct list_head *dispatch_queue, | 267 | struct list_head *dispatch_queue, |
254 | unsigned long *older_than_this) | 268 | struct wb_writeback_work *work) |
255 | { | 269 | { |
256 | LIST_HEAD(tmp); | 270 | LIST_HEAD(tmp); |
257 | struct list_head *pos, *node; | 271 | struct list_head *pos, *node; |
@@ -262,8 +276,8 @@ static int move_expired_inodes(struct list_head *delaying_queue, | |||
262 | 276 | ||
263 | while (!list_empty(delaying_queue)) { | 277 | while (!list_empty(delaying_queue)) { |
264 | inode = wb_inode(delaying_queue->prev); | 278 | inode = wb_inode(delaying_queue->prev); |
265 | if (older_than_this && | 279 | if (work->older_than_this && |
266 | inode_dirtied_after(inode, *older_than_this)) | 280 | inode_dirtied_after(inode, *work->older_than_this)) |
267 | break; | 281 | break; |
268 | if (sb && sb != inode->i_sb) | 282 | if (sb && sb != inode->i_sb) |
269 | do_sb_sort = 1; | 283 | do_sb_sort = 1; |
@@ -302,13 +316,13 @@ out: | |||
302 | * | | 316 | * | |
303 | * +--> dequeue for IO | 317 | * +--> dequeue for IO |
304 | */ | 318 | */ |
305 | static void queue_io(struct bdi_writeback *wb, unsigned long *older_than_this) | 319 | static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work) |
306 | { | 320 | { |
307 | int moved; | 321 | int moved; |
308 | assert_spin_locked(&wb->list_lock); | 322 | assert_spin_locked(&wb->list_lock); |
309 | list_splice_init(&wb->b_more_io, &wb->b_io); | 323 | list_splice_init(&wb->b_more_io, &wb->b_io); |
310 | moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, older_than_this); | 324 | moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work); |
311 | trace_writeback_queue_io(wb, older_than_this, moved); | 325 | trace_writeback_queue_io(wb, work, moved); |
312 | } | 326 | } |
313 | 327 | ||
314 | static int write_inode(struct inode *inode, struct writeback_control *wbc) | 328 | static int write_inode(struct inode *inode, struct writeback_control *wbc) |
@@ -641,31 +655,40 @@ static long __writeback_inodes_wb(struct bdi_writeback *wb, | |||
641 | return wrote; | 655 | return wrote; |
642 | } | 656 | } |
643 | 657 | ||
644 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages) | 658 | long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages, |
659 | enum wb_reason reason) | ||
645 | { | 660 | { |
646 | struct wb_writeback_work work = { | 661 | struct wb_writeback_work work = { |
647 | .nr_pages = nr_pages, | 662 | .nr_pages = nr_pages, |
648 | .sync_mode = WB_SYNC_NONE, | 663 | .sync_mode = WB_SYNC_NONE, |
649 | .range_cyclic = 1, | 664 | .range_cyclic = 1, |
665 | .reason = reason, | ||
650 | }; | 666 | }; |
651 | 667 | ||
652 | spin_lock(&wb->list_lock); | 668 | spin_lock(&wb->list_lock); |
653 | if (list_empty(&wb->b_io)) | 669 | if (list_empty(&wb->b_io)) |
654 | queue_io(wb, NULL); | 670 | queue_io(wb, &work); |
655 | __writeback_inodes_wb(wb, &work); | 671 | __writeback_inodes_wb(wb, &work); |
656 | spin_unlock(&wb->list_lock); | 672 | spin_unlock(&wb->list_lock); |
657 | 673 | ||
658 | return nr_pages - work.nr_pages; | 674 | return nr_pages - work.nr_pages; |
659 | } | 675 | } |
660 | 676 | ||
661 | static inline bool over_bground_thresh(void) | 677 | static bool over_bground_thresh(struct backing_dev_info *bdi) |
662 | { | 678 | { |
663 | unsigned long background_thresh, dirty_thresh; | 679 | unsigned long background_thresh, dirty_thresh; |
664 | 680 | ||
665 | global_dirty_limits(&background_thresh, &dirty_thresh); | 681 | global_dirty_limits(&background_thresh, &dirty_thresh); |
666 | 682 | ||
667 | return (global_page_state(NR_FILE_DIRTY) + | 683 | if (global_page_state(NR_FILE_DIRTY) + |
668 | global_page_state(NR_UNSTABLE_NFS) > background_thresh); | 684 | global_page_state(NR_UNSTABLE_NFS) > background_thresh) |
685 | return true; | ||
686 | |||
687 | if (bdi_stat(bdi, BDI_RECLAIMABLE) > | ||
688 | bdi_dirty_limit(bdi, background_thresh)) | ||
689 | return true; | ||
690 | |||
691 | return false; | ||
669 | } | 692 | } |
670 | 693 | ||
671 | /* | 694 | /* |
@@ -675,7 +698,7 @@ static inline bool over_bground_thresh(void) | |||
675 | static void wb_update_bandwidth(struct bdi_writeback *wb, | 698 | static void wb_update_bandwidth(struct bdi_writeback *wb, |
676 | unsigned long start_time) | 699 | unsigned long start_time) |
677 | { | 700 | { |
678 | __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, start_time); | 701 | __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time); |
679 | } | 702 | } |
680 | 703 | ||
681 | /* | 704 | /* |
@@ -727,7 +750,7 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
727 | * For background writeout, stop when we are below the | 750 | * For background writeout, stop when we are below the |
728 | * background dirty threshold | 751 | * background dirty threshold |
729 | */ | 752 | */ |
730 | if (work->for_background && !over_bground_thresh()) | 753 | if (work->for_background && !over_bground_thresh(wb->bdi)) |
731 | break; | 754 | break; |
732 | 755 | ||
733 | if (work->for_kupdate) { | 756 | if (work->for_kupdate) { |
@@ -738,7 +761,7 @@ static long wb_writeback(struct bdi_writeback *wb, | |||
738 | 761 | ||
739 | trace_writeback_start(wb->bdi, work); | 762 | trace_writeback_start(wb->bdi, work); |
740 | if (list_empty(&wb->b_io)) | 763 | if (list_empty(&wb->b_io)) |
741 | queue_io(wb, work->older_than_this); | 764 | queue_io(wb, work); |
742 | if (work->sb) | 765 | if (work->sb) |
743 | progress = writeback_sb_inodes(work->sb, wb, work); | 766 | progress = writeback_sb_inodes(work->sb, wb, work); |
744 | else | 767 | else |
@@ -811,13 +834,14 @@ static unsigned long get_nr_dirty_pages(void) | |||
811 | 834 | ||
812 | static long wb_check_background_flush(struct bdi_writeback *wb) | 835 | static long wb_check_background_flush(struct bdi_writeback *wb) |
813 | { | 836 | { |
814 | if (over_bground_thresh()) { | 837 | if (over_bground_thresh(wb->bdi)) { |
815 | 838 | ||
816 | struct wb_writeback_work work = { | 839 | struct wb_writeback_work work = { |
817 | .nr_pages = LONG_MAX, | 840 | .nr_pages = LONG_MAX, |
818 | .sync_mode = WB_SYNC_NONE, | 841 | .sync_mode = WB_SYNC_NONE, |
819 | .for_background = 1, | 842 | .for_background = 1, |
820 | .range_cyclic = 1, | 843 | .range_cyclic = 1, |
844 | .reason = WB_REASON_BACKGROUND, | ||
821 | }; | 845 | }; |
822 | 846 | ||
823 | return wb_writeback(wb, &work); | 847 | return wb_writeback(wb, &work); |
@@ -851,6 +875,7 @@ static long wb_check_old_data_flush(struct bdi_writeback *wb) | |||
851 | .sync_mode = WB_SYNC_NONE, | 875 | .sync_mode = WB_SYNC_NONE, |
852 | .for_kupdate = 1, | 876 | .for_kupdate = 1, |
853 | .range_cyclic = 1, | 877 | .range_cyclic = 1, |
878 | .reason = WB_REASON_PERIODIC, | ||
854 | }; | 879 | }; |
855 | 880 | ||
856 | return wb_writeback(wb, &work); | 881 | return wb_writeback(wb, &work); |
@@ -969,7 +994,7 @@ int bdi_writeback_thread(void *data) | |||
969 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back | 994 | * Start writeback of `nr_pages' pages. If `nr_pages' is zero, write back |
970 | * the whole world. | 995 | * the whole world. |
971 | */ | 996 | */ |
972 | void wakeup_flusher_threads(long nr_pages) | 997 | void wakeup_flusher_threads(long nr_pages, enum wb_reason reason) |
973 | { | 998 | { |
974 | struct backing_dev_info *bdi; | 999 | struct backing_dev_info *bdi; |
975 | 1000 | ||
@@ -982,7 +1007,7 @@ void wakeup_flusher_threads(long nr_pages) | |||
982 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { | 1007 | list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) { |
983 | if (!bdi_has_dirty_io(bdi)) | 1008 | if (!bdi_has_dirty_io(bdi)) |
984 | continue; | 1009 | continue; |
985 | __bdi_start_writeback(bdi, nr_pages, false); | 1010 | __bdi_start_writeback(bdi, nr_pages, false, reason); |
986 | } | 1011 | } |
987 | rcu_read_unlock(); | 1012 | rcu_read_unlock(); |
988 | } | 1013 | } |
@@ -1203,7 +1228,9 @@ static void wait_sb_inodes(struct super_block *sb) | |||
1203 | * on how many (if any) will be written, and this function does not wait | 1228 | * on how many (if any) will be written, and this function does not wait |
1204 | * for IO completion of submitted IO. | 1229 | * for IO completion of submitted IO. |
1205 | */ | 1230 | */ |
1206 | void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) | 1231 | void writeback_inodes_sb_nr(struct super_block *sb, |
1232 | unsigned long nr, | ||
1233 | enum wb_reason reason) | ||
1207 | { | 1234 | { |
1208 | DECLARE_COMPLETION_ONSTACK(done); | 1235 | DECLARE_COMPLETION_ONSTACK(done); |
1209 | struct wb_writeback_work work = { | 1236 | struct wb_writeback_work work = { |
@@ -1212,6 +1239,7 @@ void writeback_inodes_sb_nr(struct super_block *sb, unsigned long nr) | |||
1212 | .tagged_writepages = 1, | 1239 | .tagged_writepages = 1, |
1213 | .done = &done, | 1240 | .done = &done, |
1214 | .nr_pages = nr, | 1241 | .nr_pages = nr, |
1242 | .reason = reason, | ||
1215 | }; | 1243 | }; |
1216 | 1244 | ||
1217 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 1245 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |
@@ -1228,9 +1256,9 @@ EXPORT_SYMBOL(writeback_inodes_sb_nr); | |||
1228 | * on how many (if any) will be written, and this function does not wait | 1256 | * on how many (if any) will be written, and this function does not wait |
1229 | * for IO completion of submitted IO. | 1257 | * for IO completion of submitted IO. |
1230 | */ | 1258 | */ |
1231 | void writeback_inodes_sb(struct super_block *sb) | 1259 | void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason) |
1232 | { | 1260 | { |
1233 | return writeback_inodes_sb_nr(sb, get_nr_dirty_pages()); | 1261 | return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason); |
1234 | } | 1262 | } |
1235 | EXPORT_SYMBOL(writeback_inodes_sb); | 1263 | EXPORT_SYMBOL(writeback_inodes_sb); |
1236 | 1264 | ||
@@ -1241,11 +1269,11 @@ EXPORT_SYMBOL(writeback_inodes_sb); | |||
1241 | * Invoke writeback_inodes_sb if no writeback is currently underway. | 1269 | * Invoke writeback_inodes_sb if no writeback is currently underway. |
1242 | * Returns 1 if writeback was started, 0 if not. | 1270 | * Returns 1 if writeback was started, 0 if not. |
1243 | */ | 1271 | */ |
1244 | int writeback_inodes_sb_if_idle(struct super_block *sb) | 1272 | int writeback_inodes_sb_if_idle(struct super_block *sb, enum wb_reason reason) |
1245 | { | 1273 | { |
1246 | if (!writeback_in_progress(sb->s_bdi)) { | 1274 | if (!writeback_in_progress(sb->s_bdi)) { |
1247 | down_read(&sb->s_umount); | 1275 | down_read(&sb->s_umount); |
1248 | writeback_inodes_sb(sb); | 1276 | writeback_inodes_sb(sb, reason); |
1249 | up_read(&sb->s_umount); | 1277 | up_read(&sb->s_umount); |
1250 | return 1; | 1278 | return 1; |
1251 | } else | 1279 | } else |
@@ -1262,11 +1290,12 @@ EXPORT_SYMBOL(writeback_inodes_sb_if_idle); | |||
1262 | * Returns 1 if writeback was started, 0 if not. | 1290 | * Returns 1 if writeback was started, 0 if not. |
1263 | */ | 1291 | */ |
1264 | int writeback_inodes_sb_nr_if_idle(struct super_block *sb, | 1292 | int writeback_inodes_sb_nr_if_idle(struct super_block *sb, |
1265 | unsigned long nr) | 1293 | unsigned long nr, |
1294 | enum wb_reason reason) | ||
1266 | { | 1295 | { |
1267 | if (!writeback_in_progress(sb->s_bdi)) { | 1296 | if (!writeback_in_progress(sb->s_bdi)) { |
1268 | down_read(&sb->s_umount); | 1297 | down_read(&sb->s_umount); |
1269 | writeback_inodes_sb_nr(sb, nr); | 1298 | writeback_inodes_sb_nr(sb, nr, reason); |
1270 | up_read(&sb->s_umount); | 1299 | up_read(&sb->s_umount); |
1271 | return 1; | 1300 | return 1; |
1272 | } else | 1301 | } else |
@@ -1290,6 +1319,7 @@ void sync_inodes_sb(struct super_block *sb) | |||
1290 | .nr_pages = LONG_MAX, | 1319 | .nr_pages = LONG_MAX, |
1291 | .range_cyclic = 0, | 1320 | .range_cyclic = 0, |
1292 | .done = &done, | 1321 | .done = &done, |
1322 | .reason = WB_REASON_SYNC, | ||
1293 | }; | 1323 | }; |
1294 | 1324 | ||
1295 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); | 1325 | WARN_ON(!rwsem_is_locked(&sb->s_umount)); |