diff options
Diffstat (limited to 'mm/backing-dev.c')
-rw-r--r-- | mm/backing-dev.c | 82 |
1 files changed, 52 insertions, 30 deletions
diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 8290b1e88257..d6edf8d14f9c 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c | |||
@@ -45,6 +45,17 @@ static struct timer_list sync_supers_timer; | |||
45 | static int bdi_sync_supers(void *); | 45 | static int bdi_sync_supers(void *); |
46 | static void sync_supers_timer_fn(unsigned long); | 46 | static void sync_supers_timer_fn(unsigned long); |
47 | 47 | ||
48 | void bdi_lock_two(struct bdi_writeback *wb1, struct bdi_writeback *wb2) | ||
49 | { | ||
50 | if (wb1 < wb2) { | ||
51 | spin_lock(&wb1->list_lock); | ||
52 | spin_lock_nested(&wb2->list_lock, 1); | ||
53 | } else { | ||
54 | spin_lock(&wb2->list_lock); | ||
55 | spin_lock_nested(&wb1->list_lock, 1); | ||
56 | } | ||
57 | } | ||
58 | |||
48 | #ifdef CONFIG_DEBUG_FS | 59 | #ifdef CONFIG_DEBUG_FS |
49 | #include <linux/debugfs.h> | 60 | #include <linux/debugfs.h> |
50 | #include <linux/seq_file.h> | 61 | #include <linux/seq_file.h> |
@@ -67,34 +78,42 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) | |||
67 | struct inode *inode; | 78 | struct inode *inode; |
68 | 79 | ||
69 | nr_dirty = nr_io = nr_more_io = 0; | 80 | nr_dirty = nr_io = nr_more_io = 0; |
70 | spin_lock(&inode_wb_list_lock); | 81 | spin_lock(&wb->list_lock); |
71 | list_for_each_entry(inode, &wb->b_dirty, i_wb_list) | 82 | list_for_each_entry(inode, &wb->b_dirty, i_wb_list) |
72 | nr_dirty++; | 83 | nr_dirty++; |
73 | list_for_each_entry(inode, &wb->b_io, i_wb_list) | 84 | list_for_each_entry(inode, &wb->b_io, i_wb_list) |
74 | nr_io++; | 85 | nr_io++; |
75 | list_for_each_entry(inode, &wb->b_more_io, i_wb_list) | 86 | list_for_each_entry(inode, &wb->b_more_io, i_wb_list) |
76 | nr_more_io++; | 87 | nr_more_io++; |
77 | spin_unlock(&inode_wb_list_lock); | 88 | spin_unlock(&wb->list_lock); |
78 | 89 | ||
79 | global_dirty_limits(&background_thresh, &dirty_thresh); | 90 | global_dirty_limits(&background_thresh, &dirty_thresh); |
80 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); | 91 | bdi_thresh = bdi_dirty_limit(bdi, dirty_thresh); |
81 | 92 | ||
82 | #define K(x) ((x) << (PAGE_SHIFT - 10)) | 93 | #define K(x) ((x) << (PAGE_SHIFT - 10)) |
83 | seq_printf(m, | 94 | seq_printf(m, |
84 | "BdiWriteback: %8lu kB\n" | 95 | "BdiWriteback: %10lu kB\n" |
85 | "BdiReclaimable: %8lu kB\n" | 96 | "BdiReclaimable: %10lu kB\n" |
86 | "BdiDirtyThresh: %8lu kB\n" | 97 | "BdiDirtyThresh: %10lu kB\n" |
87 | "DirtyThresh: %8lu kB\n" | 98 | "DirtyThresh: %10lu kB\n" |
88 | "BackgroundThresh: %8lu kB\n" | 99 | "BackgroundThresh: %10lu kB\n" |
89 | "b_dirty: %8lu\n" | 100 | "BdiWritten: %10lu kB\n" |
90 | "b_io: %8lu\n" | 101 | "BdiWriteBandwidth: %10lu kBps\n" |
91 | "b_more_io: %8lu\n" | 102 | "b_dirty: %10lu\n" |
92 | "bdi_list: %8u\n" | 103 | "b_io: %10lu\n" |
93 | "state: %8lx\n", | 104 | "b_more_io: %10lu\n" |
105 | "bdi_list: %10u\n" | ||
106 | "state: %10lx\n", | ||
94 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), | 107 | (unsigned long) K(bdi_stat(bdi, BDI_WRITEBACK)), |
95 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), | 108 | (unsigned long) K(bdi_stat(bdi, BDI_RECLAIMABLE)), |
96 | K(bdi_thresh), K(dirty_thresh), | 109 | K(bdi_thresh), |
97 | K(background_thresh), nr_dirty, nr_io, nr_more_io, | 110 | K(dirty_thresh), |
111 | K(background_thresh), | ||
112 | (unsigned long) K(bdi_stat(bdi, BDI_WRITTEN)), | ||
113 | (unsigned long) K(bdi->write_bandwidth), | ||
114 | nr_dirty, | ||
115 | nr_io, | ||
116 | nr_more_io, | ||
98 | !list_empty(&bdi->bdi_list), bdi->state); | 117 | !list_empty(&bdi->bdi_list), bdi->state); |
99 | #undef K | 118 | #undef K |
100 | 119 | ||
@@ -249,18 +268,6 @@ int bdi_has_dirty_io(struct backing_dev_info *bdi) | |||
249 | return wb_has_dirty_io(&bdi->wb); | 268 | return wb_has_dirty_io(&bdi->wb); |
250 | } | 269 | } |
251 | 270 | ||
252 | static void bdi_flush_io(struct backing_dev_info *bdi) | ||
253 | { | ||
254 | struct writeback_control wbc = { | ||
255 | .sync_mode = WB_SYNC_NONE, | ||
256 | .older_than_this = NULL, | ||
257 | .range_cyclic = 1, | ||
258 | .nr_to_write = 1024, | ||
259 | }; | ||
260 | |||
261 | writeback_inodes_wb(&bdi->wb, &wbc); | ||
262 | } | ||
263 | |||
264 | /* | 271 | /* |
265 | * kupdated() used to do this. We cannot do it from the bdi_forker_thread() | 272 | * kupdated() used to do this. We cannot do it from the bdi_forker_thread() |
266 | * or we risk deadlocking on ->s_umount. The longer term solution would be | 273 | * or we risk deadlocking on ->s_umount. The longer term solution would be |
@@ -446,9 +453,10 @@ static int bdi_forker_thread(void *ptr) | |||
446 | if (IS_ERR(task)) { | 453 | if (IS_ERR(task)) { |
447 | /* | 454 | /* |
448 | * If thread creation fails, force writeout of | 455 | * If thread creation fails, force writeout of |
449 | * the bdi from the thread. | 456 | * the bdi from the thread. Hopefully 1024 is |
457 | * large enough for efficient IO. | ||
450 | */ | 458 | */ |
451 | bdi_flush_io(bdi); | 459 | writeback_inodes_wb(&bdi->wb, 1024); |
452 | } else { | 460 | } else { |
453 | /* | 461 | /* |
454 | * The spinlock makes sure we do not lose | 462 | * The spinlock makes sure we do not lose |
@@ -629,9 +637,15 @@ static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi) | |||
629 | INIT_LIST_HEAD(&wb->b_dirty); | 637 | INIT_LIST_HEAD(&wb->b_dirty); |
630 | INIT_LIST_HEAD(&wb->b_io); | 638 | INIT_LIST_HEAD(&wb->b_io); |
631 | INIT_LIST_HEAD(&wb->b_more_io); | 639 | INIT_LIST_HEAD(&wb->b_more_io); |
640 | spin_lock_init(&wb->list_lock); | ||
632 | setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); | 641 | setup_timer(&wb->wakeup_timer, wakeup_timer_fn, (unsigned long)bdi); |
633 | } | 642 | } |
634 | 643 | ||
644 | /* | ||
645 | * Initial write bandwidth: 100 MB/s | ||
646 | */ | ||
647 | #define INIT_BW (100 << (20 - PAGE_SHIFT)) | ||
648 | |||
635 | int bdi_init(struct backing_dev_info *bdi) | 649 | int bdi_init(struct backing_dev_info *bdi) |
636 | { | 650 | { |
637 | int i, err; | 651 | int i, err; |
@@ -654,6 +668,13 @@ int bdi_init(struct backing_dev_info *bdi) | |||
654 | } | 668 | } |
655 | 669 | ||
656 | bdi->dirty_exceeded = 0; | 670 | bdi->dirty_exceeded = 0; |
671 | |||
672 | bdi->bw_time_stamp = jiffies; | ||
673 | bdi->written_stamp = 0; | ||
674 | |||
675 | bdi->write_bandwidth = INIT_BW; | ||
676 | bdi->avg_write_bandwidth = INIT_BW; | ||
677 | |||
657 | err = prop_local_init_percpu(&bdi->completions); | 678 | err = prop_local_init_percpu(&bdi->completions); |
658 | 679 | ||
659 | if (err) { | 680 | if (err) { |
@@ -677,11 +698,12 @@ void bdi_destroy(struct backing_dev_info *bdi) | |||
677 | if (bdi_has_dirty_io(bdi)) { | 698 | if (bdi_has_dirty_io(bdi)) { |
678 | struct bdi_writeback *dst = &default_backing_dev_info.wb; | 699 | struct bdi_writeback *dst = &default_backing_dev_info.wb; |
679 | 700 | ||
680 | spin_lock(&inode_wb_list_lock); | 701 | bdi_lock_two(&bdi->wb, dst); |
681 | list_splice(&bdi->wb.b_dirty, &dst->b_dirty); | 702 | list_splice(&bdi->wb.b_dirty, &dst->b_dirty); |
682 | list_splice(&bdi->wb.b_io, &dst->b_io); | 703 | list_splice(&bdi->wb.b_io, &dst->b_io); |
683 | list_splice(&bdi->wb.b_more_io, &dst->b_more_io); | 704 | list_splice(&bdi->wb.b_more_io, &dst->b_more_io); |
684 | spin_unlock(&inode_wb_list_lock); | 705 | spin_unlock(&bdi->wb.list_lock); |
706 | spin_unlock(&dst->list_lock); | ||
685 | } | 707 | } |
686 | 708 | ||
687 | bdi_unregister(bdi); | 709 | bdi_unregister(bdi); |