aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2010-07-25 07:29:21 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:53:56 -0400
commit253c34e9b10c30d3064be654b5b78fbc1a8b1896 (patch)
tree1ee077a52fd1036fe26ddfe8cabd5a5315dc1285 /fs/fs-writeback.c
parentfff5b85aa4225a7be157f208277a055822039a9e (diff)
writeback: prevent unnecessary bdi threads wakeups
Finally, we can get rid of unnecessary wake-ups in bdi threads, which are very bad for battery-driven devices. There are two types of activities bdi threads do: 1. process bdi works from the 'bdi->work_list' 2. periodic write-back So there are 2 sources of wake-up events for bdi threads: 1. 'bdi_queue_work()' - submits bdi works 2. '__mark_inode_dirty()' - adds dirty I/O to bdi's The former already has bdi wake-up code. The latter does not, and this patch adds it. '__mark_inode_dirty()' is hot-path function, but this patch adds another 'spin_lock(&bdi->wb_lock)' there. However, it is taken only in rare cases when the bdi has no dirty inodes. So adding this spinlock should be fine and should not affect performance. This patch makes sure bdi threads and the forker thread do not wake-up if there is nothing to do. The forker thread will nevertheless wake up at least every 5 min. to check whether it has to kill a bdi thread. This can also be optimized, but is not worth it. This patch also tidies up the warning about unregistered bid, and turns it from an ugly crocodile to a simple 'WARN()' statement. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c59
1 files changed, 48 insertions, 11 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 905f3ea38488..55f6e46e06f1 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -823,10 +823,16 @@ int bdi_writeback_thread(void *data)
823 continue; 823 continue;
824 } 824 }
825 825
826 if (dirty_writeback_interval) 826 if (wb_has_dirty_io(wb) && dirty_writeback_interval)
827 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10)); 827 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
828 else 828 else {
829 /*
830 * We have nothing to do, so can go sleep without any
831 * timeout and save power. When a work is queued or
832 * something is made dirty - we will be woken up.
833 */
829 schedule(); 834 schedule();
835 }
830 836
831 try_to_freeze(); 837 try_to_freeze();
832 } 838 }
@@ -862,6 +868,26 @@ void wakeup_flusher_threads(long nr_pages)
862 rcu_read_unlock(); 868 rcu_read_unlock();
863} 869}
864 870
871/*
872 * This function is used when the first inode for this bdi is marked dirty. It
873 * wakes-up the corresponding bdi thread which should then take care of the
874 * periodic background write-out of dirty inodes.
875 */
876static void wakeup_bdi_thread(struct backing_dev_info *bdi)
877{
878 spin_lock(&bdi->wb_lock);
879 if (bdi->wb.task)
880 wake_up_process(bdi->wb.task);
881 else
882 /*
883 * When bdi tasks are inactive for long time, they are killed.
884 * In this case we have to wake-up the forker thread which
885 * should create and run the bdi thread.
886 */
887 wake_up_process(default_backing_dev_info.wb.task);
888 spin_unlock(&bdi->wb_lock);
889}
890
865static noinline void block_dump___mark_inode_dirty(struct inode *inode) 891static noinline void block_dump___mark_inode_dirty(struct inode *inode)
866{ 892{
867 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 893 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -914,6 +940,8 @@ static noinline void block_dump___mark_inode_dirty(struct inode *inode)
914void __mark_inode_dirty(struct inode *inode, int flags) 940void __mark_inode_dirty(struct inode *inode, int flags)
915{ 941{
916 struct super_block *sb = inode->i_sb; 942 struct super_block *sb = inode->i_sb;
943 struct backing_dev_info *bdi = NULL;
944 bool wakeup_bdi = false;
917 945
918 /* 946 /*
919 * Don't do this for I_DIRTY_PAGES - that doesn't actually 947 * Don't do this for I_DIRTY_PAGES - that doesn't actually
@@ -967,22 +995,31 @@ void __mark_inode_dirty(struct inode *inode, int flags)
967 * reposition it (that would break b_dirty time-ordering). 995 * reposition it (that would break b_dirty time-ordering).
968 */ 996 */
969 if (!was_dirty) { 997 if (!was_dirty) {
970 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 998 bdi = inode_to_bdi(inode);
971 struct backing_dev_info *bdi = wb->bdi; 999
972 1000 if (bdi_cap_writeback_dirty(bdi)) {
973 if (bdi_cap_writeback_dirty(bdi) && 1001 WARN(!test_bit(BDI_registered, &bdi->state),
974 !test_bit(BDI_registered, &bdi->state)) { 1002 "bdi-%s not registered\n", bdi->name);
975 WARN_ON(1); 1003
976 printk(KERN_ERR "bdi-%s not registered\n", 1004 /*
977 bdi->name); 1005 * If this is the first dirty inode for this
1006 * bdi, we have to wake-up the corresponding
1007 * bdi thread to make sure background
1008 * write-back happens later.
1009 */
1010 if (!wb_has_dirty_io(&bdi->wb))
1011 wakeup_bdi = true;
978 } 1012 }
979 1013
980 inode->dirtied_when = jiffies; 1014 inode->dirtied_when = jiffies;
981 list_move(&inode->i_list, &wb->b_dirty); 1015 list_move(&inode->i_list, &bdi->wb.b_dirty);
982 } 1016 }
983 } 1017 }
984out: 1018out:
985 spin_unlock(&inode_lock); 1019 spin_unlock(&inode_lock);
1020
1021 if (wakeup_bdi)
1022 wakeup_bdi_thread(bdi);
986} 1023}
987EXPORT_SYMBOL(__mark_inode_dirty); 1024EXPORT_SYMBOL(__mark_inode_dirty);
988 1025