aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-02-04 09:23:24 -0500
committerChris Mason <chris.mason@oracle.com>2009-02-04 09:23:24 -0500
commitb51912c91fcf7581cc7b4550f1bb96422809d9ed (patch)
tree3738e358e75088d300b289e013beccffe0af8e05
parent0279b4cd86685b5eea467c1b74ce94f0add2c0a3 (diff)
Btrfs: async threads should try harder to find work
Tracing shows the delay between when an async thread goes to sleep and when more work is added is often very short. This commit adds a little bit of delay and extra checking to the code right before we schedule out. It allows more work to be added to the worker without requiring notifications from other procs. Signed-off-by: Chris Mason <chris.mason@oracle.com>
-rw-r--r--fs/btrfs/async-thread.c50
-rw-r--r--fs/btrfs/disk-io.c2
2 files changed, 46 insertions, 6 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index f2e80f3768ec..c84ca1f5259a 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -19,7 +19,8 @@
19#include <linux/kthread.h> 19#include <linux/kthread.h>
20#include <linux/list.h> 20#include <linux/list.h>
21#include <linux/spinlock.h> 21#include <linux/spinlock.h>
22# include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/ftrace.h>
23#include "async-thread.h" 24#include "async-thread.h"
24 25
25#define WORK_QUEUED_BIT 0 26#define WORK_QUEUED_BIT 0
@@ -142,6 +143,7 @@ static int worker_loop(void *arg)
142 struct btrfs_work *work; 143 struct btrfs_work *work;
143 do { 144 do {
144 spin_lock_irq(&worker->lock); 145 spin_lock_irq(&worker->lock);
146again_locked:
145 while (!list_empty(&worker->pending)) { 147 while (!list_empty(&worker->pending)) {
146 cur = worker->pending.next; 148 cur = worker->pending.next;
147 work = list_entry(cur, struct btrfs_work, list); 149 work = list_entry(cur, struct btrfs_work, list);
@@ -164,14 +166,50 @@ static int worker_loop(void *arg)
164 check_idle_worker(worker); 166 check_idle_worker(worker);
165 167
166 } 168 }
167 worker->working = 0;
168 if (freezing(current)) { 169 if (freezing(current)) {
170 worker->working = 0;
171 spin_unlock_irq(&worker->lock);
169 refrigerator(); 172 refrigerator();
170 } else { 173 } else {
171 set_current_state(TASK_INTERRUPTIBLE);
172 spin_unlock_irq(&worker->lock); 174 spin_unlock_irq(&worker->lock);
173 if (!kthread_should_stop()) 175 if (!kthread_should_stop()) {
176 cpu_relax();
177 /*
178 * we've dropped the lock, did someone else
179 * jump_in?
180 */
181 smp_mb();
182 if (!list_empty(&worker->pending))
183 continue;
184
185 /*
186 * this short schedule allows more work to
187 * come in without the queue functions
188 * needing to go through wake_up_process()
189 *
190 * worker->working is still 1, so nobody
191 * is going to try and wake us up
192 */
193 schedule_timeout(1);
194 smp_mb();
195 if (!list_empty(&worker->pending))
196 continue;
197
198 /* still no more work?, sleep for real */
199 spin_lock_irq(&worker->lock);
200 set_current_state(TASK_INTERRUPTIBLE);
201 if (!list_empty(&worker->pending))
202 goto again_locked;
203
204 /*
205 * this makes sure we get a wakeup when someone
206 * adds something new to the queue
207 */
208 worker->working = 0;
209 spin_unlock_irq(&worker->lock);
210
174 schedule(); 211 schedule();
212 }
175 __set_current_state(TASK_RUNNING); 213 __set_current_state(TASK_RUNNING);
176 } 214 }
177 } while (!kthread_should_stop()); 215 } while (!kthread_should_stop());
@@ -355,8 +393,8 @@ int btrfs_requeue_work(struct btrfs_work *work)
355 goto out; 393 goto out;
356 394
357 spin_lock_irqsave(&worker->lock, flags); 395 spin_lock_irqsave(&worker->lock, flags);
358 atomic_inc(&worker->num_pending);
359 list_add_tail(&work->list, &worker->pending); 396 list_add_tail(&work->list, &worker->pending);
397 atomic_inc(&worker->num_pending);
360 398
361 /* by definition we're busy, take ourselves off the idle 399 /* by definition we're busy, take ourselves off the idle
362 * list 400 * list
@@ -405,9 +443,9 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
405 443
406 spin_lock_irqsave(&worker->lock, flags); 444 spin_lock_irqsave(&worker->lock, flags);
407 445
446 list_add_tail(&work->list, &worker->pending);
408 atomic_inc(&worker->num_pending); 447 atomic_inc(&worker->num_pending);
409 check_busy_worker(worker); 448 check_busy_worker(worker);
410 list_add_tail(&work->list, &worker->pending);
411 449
412 /* 450 /*
413 * avoid calling into wake_up_process if this thread has already 451 * avoid calling into wake_up_process if this thread has already
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7feac5a475e9..9c3810047976 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -1679,6 +1679,8 @@ struct btrfs_root *open_ctree(struct super_block *sb,
1679 * low idle thresh 1679 * low idle thresh
1680 */ 1680 */
1681 fs_info->endio_workers.idle_thresh = 4; 1681 fs_info->endio_workers.idle_thresh = 4;
1682 fs_info->endio_meta_workers.idle_thresh = 4;
1683
1682 fs_info->endio_write_workers.idle_thresh = 64; 1684 fs_info->endio_write_workers.idle_thresh = 64;
1683 fs_info->endio_meta_write_workers.idle_thresh = 64; 1685 fs_info->endio_meta_write_workers.idle_thresh = 64;
1684 1686