diff options
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r-- | fs/btrfs/async-thread.c | 66 |
1 files changed, 57 insertions, 9 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 8e2fec05dbe0..51bfdfc8fcda 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -16,11 +16,10 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/version.h> | ||
20 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
21 | #include <linux/list.h> | 20 | #include <linux/list.h> |
22 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
23 | # include <linux/freezer.h> | 22 | #include <linux/freezer.h> |
24 | #include "async-thread.h" | 23 | #include "async-thread.h" |
25 | 24 | ||
26 | #define WORK_QUEUED_BIT 0 | 25 | #define WORK_QUEUED_BIT 0 |
@@ -143,6 +142,7 @@ static int worker_loop(void *arg) | |||
143 | struct btrfs_work *work; | 142 | struct btrfs_work *work; |
144 | do { | 143 | do { |
145 | spin_lock_irq(&worker->lock); | 144 | spin_lock_irq(&worker->lock); |
145 | again_locked: | ||
146 | while (!list_empty(&worker->pending)) { | 146 | while (!list_empty(&worker->pending)) { |
147 | cur = worker->pending.next; | 147 | cur = worker->pending.next; |
148 | work = list_entry(cur, struct btrfs_work, list); | 148 | work = list_entry(cur, struct btrfs_work, list); |
@@ -165,14 +165,54 @@ static int worker_loop(void *arg) | |||
165 | check_idle_worker(worker); | 165 | check_idle_worker(worker); |
166 | 166 | ||
167 | } | 167 | } |
168 | worker->working = 0; | ||
169 | if (freezing(current)) { | 168 | if (freezing(current)) { |
169 | worker->working = 0; | ||
170 | spin_unlock_irq(&worker->lock); | ||
170 | refrigerator(); | 171 | refrigerator(); |
171 | } else { | 172 | } else { |
172 | set_current_state(TASK_INTERRUPTIBLE); | ||
173 | spin_unlock_irq(&worker->lock); | 173 | spin_unlock_irq(&worker->lock); |
174 | if (!kthread_should_stop()) | 174 | if (!kthread_should_stop()) { |
175 | schedule(); | 175 | cpu_relax(); |
176 | /* | ||
177 | * we've dropped the lock, did someone else | ||
178 | * jump_in? | ||
179 | */ | ||
180 | smp_mb(); | ||
181 | if (!list_empty(&worker->pending)) | ||
182 | continue; | ||
183 | |||
184 | /* | ||
185 | * this short schedule allows more work to | ||
186 | * come in without the queue functions | ||
187 | * needing to go through wake_up_process() | ||
188 | * | ||
189 | * worker->working is still 1, so nobody | ||
190 | * is going to try and wake us up | ||
191 | */ | ||
192 | schedule_timeout(1); | ||
193 | smp_mb(); | ||
194 | if (!list_empty(&worker->pending)) | ||
195 | continue; | ||
196 | |||
197 | if (kthread_should_stop()) | ||
198 | break; | ||
199 | |||
200 | /* still no more work?, sleep for real */ | ||
201 | spin_lock_irq(&worker->lock); | ||
202 | set_current_state(TASK_INTERRUPTIBLE); | ||
203 | if (!list_empty(&worker->pending)) | ||
204 | goto again_locked; | ||
205 | |||
206 | /* | ||
207 | * this makes sure we get a wakeup when someone | ||
208 | * adds something new to the queue | ||
209 | */ | ||
210 | worker->working = 0; | ||
211 | spin_unlock_irq(&worker->lock); | ||
212 | |||
213 | if (!kthread_should_stop()) | ||
214 | schedule(); | ||
215 | } | ||
176 | __set_current_state(TASK_RUNNING); | 216 | __set_current_state(TASK_RUNNING); |
177 | } | 217 | } |
178 | } while (!kthread_should_stop()); | 218 | } while (!kthread_should_stop()); |
@@ -350,13 +390,14 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
350 | { | 390 | { |
351 | struct btrfs_worker_thread *worker = work->worker; | 391 | struct btrfs_worker_thread *worker = work->worker; |
352 | unsigned long flags; | 392 | unsigned long flags; |
393 | int wake = 0; | ||
353 | 394 | ||
354 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) | 395 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
355 | goto out; | 396 | goto out; |
356 | 397 | ||
357 | spin_lock_irqsave(&worker->lock, flags); | 398 | spin_lock_irqsave(&worker->lock, flags); |
358 | atomic_inc(&worker->num_pending); | ||
359 | list_add_tail(&work->list, &worker->pending); | 399 | list_add_tail(&work->list, &worker->pending); |
400 | atomic_inc(&worker->num_pending); | ||
360 | 401 | ||
361 | /* by definition we're busy, take ourselves off the idle | 402 | /* by definition we're busy, take ourselves off the idle |
362 | * list | 403 | * list |
@@ -368,10 +409,16 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
368 | &worker->workers->worker_list); | 409 | &worker->workers->worker_list); |
369 | spin_unlock_irqrestore(&worker->workers->lock, flags); | 410 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
370 | } | 411 | } |
412 | if (!worker->working) { | ||
413 | wake = 1; | ||
414 | worker->working = 1; | ||
415 | } | ||
371 | 416 | ||
372 | spin_unlock_irqrestore(&worker->lock, flags); | 417 | spin_unlock_irqrestore(&worker->lock, flags); |
373 | 418 | if (wake) | |
419 | wake_up_process(worker->task); | ||
374 | out: | 420 | out: |
421 | |||
375 | return 0; | 422 | return 0; |
376 | } | 423 | } |
377 | 424 | ||
@@ -398,9 +445,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
398 | } | 445 | } |
399 | 446 | ||
400 | spin_lock_irqsave(&worker->lock, flags); | 447 | spin_lock_irqsave(&worker->lock, flags); |
448 | |||
449 | list_add_tail(&work->list, &worker->pending); | ||
401 | atomic_inc(&worker->num_pending); | 450 | atomic_inc(&worker->num_pending); |
402 | check_busy_worker(worker); | 451 | check_busy_worker(worker); |
403 | list_add_tail(&work->list, &worker->pending); | ||
404 | 452 | ||
405 | /* | 453 | /* |
406 | * avoid calling into wake_up_process if this thread has already | 454 | * avoid calling into wake_up_process if this thread has already |