aboutsummaryrefslogtreecommitdiffstats
path: root/fs/fs-writeback.c
diff options
context:
space:
mode:
authorArtem Bityutskiy <Artem.Bityutskiy@nokia.com>2010-07-25 07:29:20 -0400
committerJens Axboe <jaxboe@fusionio.com>2010-08-07 12:53:56 -0400
commitfff5b85aa4225a7be157f208277a055822039a9e (patch)
treef4310bf188ef0a1dac52da39b61968aa566a717e /fs/fs-writeback.c
parentadf392407076b85816d48714fb8eeaedb2157884 (diff)
writeback: move bdi threads exiting logic to the forker thread
Currently, bdi threads can decide to exit if there were no useful activities for 5 minutes. However, this causes nasty races: we can easily oops in the 'bdi_queue_work()' if the bdi thread decides to exit while we are waking it up. And even if we do not oops, but the bdi tread exits immediately after we wake it up, we'd lose the wake-up event and have an unnecessary delay (up to 5 secs) in the bdi work processing. This patch makes the forker thread to be the central place which not only creates bdi threads, but also kills them if they were inactive long enough. This better design-wise. Another reason why this change was done is to prepare for the further changes which will prevent the bdi threads from waking up every 5 sec and wasting power. Indeed, when the task does not wake up periodically anymore, it won't be able to exit either. This patch also moves the the 'wake_up_bit()' call from the bdi thread to the forker thread as well. So now the forker thread sets the BDI_pending bit, then forks the task or kills it, then clears the bit and wakes up the waiting process. The only process which may wain on the bit is 'bdi_wb_shutdown()'. This function was changed as well - now it first removes the bdi from the 'bdi_list', then waits on the 'BDI_pending' bit. Once it wakes up, it is guaranteed that the forker thread won't race with it, because the bdi is not visible. Note, the forker thread sets the 'BDI_pending' bit under the 'bdi->wb_lock' which is essential for proper serialization. And additionally, when we change 'bdi->wb.task', we now take the 'bdi->work_lock', to make sure that we do not lose wake-ups which we otherwise would when raced with, say, 'bdi_queue_work()'. Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy@nokia.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'fs/fs-writeback.c')
-rw-r--r--fs/fs-writeback.c54
1 files changed, 12 insertions, 42 deletions
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 9f5cab75c15..905f3ea3848 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -78,21 +78,17 @@ static void bdi_queue_work(struct backing_dev_info *bdi,
78 78
79 spin_lock(&bdi->wb_lock); 79 spin_lock(&bdi->wb_lock);
80 list_add_tail(&work->list, &bdi->work_list); 80 list_add_tail(&work->list, &bdi->work_list);
81 spin_unlock(&bdi->wb_lock); 81 if (bdi->wb.task) {
82 82 wake_up_process(bdi->wb.task);
83 /* 83 } else {
84 * If the default thread isn't there, make sure we add it. When 84 /*
85 * it gets created and wakes up, we'll run this work. 85 * The bdi thread isn't there, wake up the forker thread which
86 */ 86 * will create and run it.
87 if (unlikely(!bdi->wb.task)) { 87 */
88 trace_writeback_nothread(bdi, work); 88 trace_writeback_nothread(bdi, work);
89 wake_up_process(default_backing_dev_info.wb.task); 89 wake_up_process(default_backing_dev_info.wb.task);
90 } else {
91 struct bdi_writeback *wb = &bdi->wb;
92
93 if (wb->task)
94 wake_up_process(wb->task);
95 } 90 }
91 spin_unlock(&bdi->wb_lock);
96} 92}
97 93
98static void 94static void
@@ -800,7 +796,6 @@ int bdi_writeback_thread(void *data)
800{ 796{
801 struct bdi_writeback *wb = data; 797 struct bdi_writeback *wb = data;
802 struct backing_dev_info *bdi = wb->bdi; 798 struct backing_dev_info *bdi = wb->bdi;
803 unsigned long wait_jiffies = -1UL;
804 long pages_written; 799 long pages_written;
805 800
806 current->flags |= PF_FLUSHER | PF_SWAPWRITE; 801 current->flags |= PF_FLUSHER | PF_SWAPWRITE;
@@ -812,13 +807,6 @@ int bdi_writeback_thread(void *data)
812 */ 807 */
813 set_user_nice(current, 0); 808 set_user_nice(current, 0);
814 809
815 /*
816 * Clear pending bit and wakeup anybody waiting to tear us down
817 */
818 clear_bit(BDI_pending, &bdi->state);
819 smp_mb__after_clear_bit();
820 wake_up_bit(&bdi->state, BDI_pending);
821
822 trace_writeback_thread_start(bdi); 810 trace_writeback_thread_start(bdi);
823 811
824 while (!kthread_should_stop()) { 812 while (!kthread_should_stop()) {
@@ -828,18 +816,6 @@ int bdi_writeback_thread(void *data)
828 816
829 if (pages_written) 817 if (pages_written)
830 wb->last_active = jiffies; 818 wb->last_active = jiffies;
831 else if (wait_jiffies != -1UL) {
832 unsigned long max_idle;
833
834 /*
835 * Longest period of inactivity that we tolerate. If we
836 * see dirty data again later, the thread will get
837 * recreated automatically.
838 */
839 max_idle = max(5UL * 60 * HZ, wait_jiffies);
840 if (time_after(jiffies, max_idle + wb->last_active))
841 break;
842 }
843 819
844 set_current_state(TASK_INTERRUPTIBLE); 820 set_current_state(TASK_INTERRUPTIBLE);
845 if (!list_empty(&bdi->work_list)) { 821 if (!list_empty(&bdi->work_list)) {
@@ -847,21 +823,15 @@ int bdi_writeback_thread(void *data)
847 continue; 823 continue;
848 } 824 }
849 825
850 if (dirty_writeback_interval) { 826 if (dirty_writeback_interval)
851 wait_jiffies = msecs_to_jiffies(dirty_writeback_interval * 10); 827 schedule_timeout(msecs_to_jiffies(dirty_writeback_interval * 10));
852 schedule_timeout(wait_jiffies); 828 else
853 } else
854 schedule(); 829 schedule();
855 830
856 try_to_freeze(); 831 try_to_freeze();
857 } 832 }
858 833
859 wb->task = NULL; 834 /* Flush any work that raced with us exiting */
860
861 /*
862 * Flush any work that raced with us exiting. No new work
863 * will be added, since this bdi isn't discoverable anymore.
864 */
865 if (!list_empty(&bdi->work_list)) 835 if (!list_empty(&bdi->work_list))
866 wb_do_writeback(wb, 1); 836 wb_do_writeback(wb, 1);
867 837