aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2010-07-25 07:29:22 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:53:56 -0400
commit6467716a37673e8d47b4984eb19839bdad0a8353 (patch)
tree8b2bfd38d53e31c47654162d4ce9220c4424a31d /fs/fs-writeback.c
parent253c34e9b10c30d3064be654b5b78fbc1a8b1896 (diff)
writeback: optimize periodic bdi thread wakeups
Whe the first inode for a bdi is marked dirty, we wake up the bdi thread which should take care of the periodic background write-out. However, the write-out will actually start only 'dirty_writeback_interval' centisecs later, so we can delay the wake-up. This change was requested by Nick Piggin who pointed out that if we delay the wake-up, we weed out 2 unnecessary contex switches, which matters because '__mark_inode_dirty()' is a hot-path function. This patch introduces a new function - 'bdi_wakeup_thread_delayed()', which sets up a timer to wake-up the bdi thread and returns. So the wake-up is delayed. We also delete the timer in bdi threads just before writing-back. And synchronously delete it when unregistering bdi. At the unregister point the bdi does not have any users, so no one can arm it again. Since now we take 'bdi->wb_lock' in the timer, which can execute in softirq context, we have to use 'spin_lock_bh()' for 'bdi->wb_lock'. This patch makes this change as well. This patch also moves the 'bdi_wb_init()' function down in the file to avoid forward-declaration of 'bdi_wakeup_thread_delayed()'. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c36
1 files changed, 11 insertions, 25 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 55f6e46e06f1..bfa2df2c7ce2 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -76,7 +76,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
76{ 76{
77 trace_writeback_queue(bdi, work); 77 trace_writeback_queue(bdi, work);
78 78
79 spin_lock(&bdi->wb_lock); 79 spin_lock_bh(&bdi->wb_lock);
80 list_add_tail(&work->list, &bdi->work_list); 80 list_add_tail(&work->list, &bdi->work_list);
81 if (bdi->wb.task) { 81 if (bdi->wb.task) {
82 wake_up_process(bdi->wb.task); 82 wake_up_process(bdi->wb.task);
@@ -88,7 +88,7 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
88 trace_writeback_nothread(bdi, work); 88 trace_writeback_nothread(bdi, work);
89 wake_up_process(default_backing_dev_info.wb.task); 89 wake_up_process(default_backing_dev_info.wb.task);
90 } 90 }
91 spin_unlock(&bdi->wb_lock); 91 spin_unlock_bh(&bdi->wb_lock);
92} 92}
93 93
94static void 94static void
@@ -704,13 +704,13 @@ get_next_work_item(struct backing_dev_info *bdi)
704{ 704{
705 struct wb_writeback_work *work = NULL; 705 struct wb_writeback_work *work = NULL;
706 706
707 spin_lock(&bdi->wb_lock); 707 spin_lock_bh(&bdi->wb_lock);
708 if (!list_empty(&bdi->work_list)) { 708 if (!list_empty(&bdi->work_list)) {
709 work = list_entry(bdi->work_list.next, 709 work = list_entry(bdi->work_list.next,
710 struct wb_writeback_work, list); 710 struct wb_writeback_work, list);
711 list_del_init(&work->list); 711 list_del_init(&work->list);
712 } 712 }
713 spin_unlock(&bdi->wb_lock); 713 spin_unlock_bh(&bdi->wb_lock);
714 return work; 714 return work;
715} 715}
716 716
@@ -810,6 +810,12 @@ int bdi_writeback_thread(void *data)
810 trace_writeback_thread_start(bdi); 810 trace_writeback_thread_start(bdi);
811 811
812 while (!kthread_should_stop()) { 812 while (!kthread_should_stop()) {
813 /*
814 * Remove own delayed wake-up timer, since we are already awake
815 * and we'll take care of the preriodic write-back.
816 */
817 del_timer(&wb->wakeup_timer);
818
813 pages_written = wb_do_writeback(wb, 0); 819 pages_written = wb_do_writeback(wb, 0);
814 820
815 trace_writeback_pages_written(pages_written); 821 trace_writeback_pages_written(pages_written);
@@ -868,26 +874,6 @@ void wakeup_flusher_threads(long nr_pages)
868 rcu_read_unlock(); 874 rcu_read_unlock();
869} 875}
870 876
871/*
872 * This function is used when the first inode for this bdi is marked dirty. It
873 * wakes-up the corresponding bdi thread which should then take care of the
874 * periodic background write-out of dirty inodes.
875 */
876static void wakeup_bdi_thread(struct backing_dev_info *bdi)
877{
878 spin_lock(&bdi->wb_lock);
879 if (bdi->wb.task)
880 wake_up_process(bdi->wb.task);
881 else
882 /*
883 * When bdi tasks are inactive for long time, they are killed.
884 * In this case we have to wake-up the forker thread which
885 * should create and run the bdi thread.
886 */
887 wake_up_process(default_backing_dev_info.wb.task);
888 spin_unlock(&bdi->wb_lock);
889}
890
891static noinline void block_dump___mark_inode_dirty(struct inode *inode) 877static noinline void block_dump___mark_inode_dirty(struct inode *inode)
892{ 878{
893 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) { 879 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
@@ -1019,7 +1005,7 @@ out:
1019 spin_unlock(&inode_lock); 1005 spin_unlock(&inode_lock);
1020 1006
1021 if (wakeup_bdi) 1007 if (wakeup_bdi)
1022 wakeup_bdi_thread(bdi); 1008 bdi_wakeup_thread_delayed(bdi);
1023} 1009}
1024EXPORT_SYMBOL(__mark_inode_dirty); 1010EXPORT_SYMBOL(__mark_inode_dirty);
1025 1011