diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-02-09 04:32:48 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-02-09 04:32:48 -0500 |
commit | 4ad476e11f94fd3724c6e272d8220e99cd222b27 (patch) | |
tree | a8684d4ecbfe1d2b2b1e29ed74de65394ad21b43 /fs/btrfs/async-thread.c | |
parent | 304cc6ae1bf7a8e6d00053fbe0b7e2b26cdddda2 (diff) | |
parent | 8e4921515c1a379539607eb443d51c30f4f7f338 (diff) |
Merge commit 'v2.6.29-rc4' into tracing/core
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r-- | fs/btrfs/async-thread.c | 61 |
1 files changed, 53 insertions, 8 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 8e2fec05dbe0..c84ca1f5259a 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -16,11 +16,11 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/version.h> | ||
20 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
21 | #include <linux/list.h> | 20 | #include <linux/list.h> |
22 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
23 | # include <linux/freezer.h> | 22 | #include <linux/freezer.h> |
23 | #include <linux/ftrace.h> | ||
24 | #include "async-thread.h" | 24 | #include "async-thread.h" |
25 | 25 | ||
26 | #define WORK_QUEUED_BIT 0 | 26 | #define WORK_QUEUED_BIT 0 |
@@ -143,6 +143,7 @@ static int worker_loop(void *arg) | |||
143 | struct btrfs_work *work; | 143 | struct btrfs_work *work; |
144 | do { | 144 | do { |
145 | spin_lock_irq(&worker->lock); | 145 | spin_lock_irq(&worker->lock); |
146 | again_locked: | ||
146 | while (!list_empty(&worker->pending)) { | 147 | while (!list_empty(&worker->pending)) { |
147 | cur = worker->pending.next; | 148 | cur = worker->pending.next; |
148 | work = list_entry(cur, struct btrfs_work, list); | 149 | work = list_entry(cur, struct btrfs_work, list); |
@@ -165,14 +166,50 @@ static int worker_loop(void *arg) | |||
165 | check_idle_worker(worker); | 166 | check_idle_worker(worker); |
166 | 167 | ||
167 | } | 168 | } |
168 | worker->working = 0; | ||
169 | if (freezing(current)) { | 169 | if (freezing(current)) { |
170 | worker->working = 0; | ||
171 | spin_unlock_irq(&worker->lock); | ||
170 | refrigerator(); | 172 | refrigerator(); |
171 | } else { | 173 | } else { |
172 | set_current_state(TASK_INTERRUPTIBLE); | ||
173 | spin_unlock_irq(&worker->lock); | 174 | spin_unlock_irq(&worker->lock); |
174 | if (!kthread_should_stop()) | 175 | if (!kthread_should_stop()) { |
176 | cpu_relax(); | ||
177 | /* | ||
178 | * we've dropped the lock, did someone else | ||
179 | * jump_in? | ||
180 | */ | ||
181 | smp_mb(); | ||
182 | if (!list_empty(&worker->pending)) | ||
183 | continue; | ||
184 | |||
185 | /* | ||
186 | * this short schedule allows more work to | ||
187 | * come in without the queue functions | ||
188 | * needing to go through wake_up_process() | ||
189 | * | ||
190 | * worker->working is still 1, so nobody | ||
191 | * is going to try and wake us up | ||
192 | */ | ||
193 | schedule_timeout(1); | ||
194 | smp_mb(); | ||
195 | if (!list_empty(&worker->pending)) | ||
196 | continue; | ||
197 | |||
198 | /* still no more work?, sleep for real */ | ||
199 | spin_lock_irq(&worker->lock); | ||
200 | set_current_state(TASK_INTERRUPTIBLE); | ||
201 | if (!list_empty(&worker->pending)) | ||
202 | goto again_locked; | ||
203 | |||
204 | /* | ||
205 | * this makes sure we get a wakeup when someone | ||
206 | * adds something new to the queue | ||
207 | */ | ||
208 | worker->working = 0; | ||
209 | spin_unlock_irq(&worker->lock); | ||
210 | |||
175 | schedule(); | 211 | schedule(); |
212 | } | ||
176 | __set_current_state(TASK_RUNNING); | 213 | __set_current_state(TASK_RUNNING); |
177 | } | 214 | } |
178 | } while (!kthread_should_stop()); | 215 | } while (!kthread_should_stop()); |
@@ -350,13 +387,14 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
350 | { | 387 | { |
351 | struct btrfs_worker_thread *worker = work->worker; | 388 | struct btrfs_worker_thread *worker = work->worker; |
352 | unsigned long flags; | 389 | unsigned long flags; |
390 | int wake = 0; | ||
353 | 391 | ||
354 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) | 392 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
355 | goto out; | 393 | goto out; |
356 | 394 | ||
357 | spin_lock_irqsave(&worker->lock, flags); | 395 | spin_lock_irqsave(&worker->lock, flags); |
358 | atomic_inc(&worker->num_pending); | ||
359 | list_add_tail(&work->list, &worker->pending); | 396 | list_add_tail(&work->list, &worker->pending); |
397 | atomic_inc(&worker->num_pending); | ||
360 | 398 | ||
361 | /* by definition we're busy, take ourselves off the idle | 399 | /* by definition we're busy, take ourselves off the idle |
362 | * list | 400 | * list |
@@ -368,10 +406,16 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
368 | &worker->workers->worker_list); | 406 | &worker->workers->worker_list); |
369 | spin_unlock_irqrestore(&worker->workers->lock, flags); | 407 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
370 | } | 408 | } |
409 | if (!worker->working) { | ||
410 | wake = 1; | ||
411 | worker->working = 1; | ||
412 | } | ||
371 | 413 | ||
372 | spin_unlock_irqrestore(&worker->lock, flags); | 414 | spin_unlock_irqrestore(&worker->lock, flags); |
373 | 415 | if (wake) | |
416 | wake_up_process(worker->task); | ||
374 | out: | 417 | out: |
418 | |||
375 | return 0; | 419 | return 0; |
376 | } | 420 | } |
377 | 421 | ||
@@ -398,9 +442,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
398 | } | 442 | } |
399 | 443 | ||
400 | spin_lock_irqsave(&worker->lock, flags); | 444 | spin_lock_irqsave(&worker->lock, flags); |
445 | |||
446 | list_add_tail(&work->list, &worker->pending); | ||
401 | atomic_inc(&worker->num_pending); | 447 | atomic_inc(&worker->num_pending); |
402 | check_busy_worker(worker); | 448 | check_busy_worker(worker); |
403 | list_add_tail(&work->list, &worker->pending); | ||
404 | 449 | ||
405 | /* | 450 | /* |
406 | * avoid calling into wake_up_process if this thread has already | 451 | * avoid calling into wake_up_process if this thread has already |