diff options
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r-- | fs/btrfs/async-thread.c | 116 |
1 files changed, 99 insertions, 17 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 8e2fec05dbe0..502c3d61de62 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
@@ -16,16 +16,16 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/version.h> | ||
20 | #include <linux/kthread.h> | 19 | #include <linux/kthread.h> |
21 | #include <linux/list.h> | 20 | #include <linux/list.h> |
22 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
23 | # include <linux/freezer.h> | 22 | #include <linux/freezer.h> |
24 | #include "async-thread.h" | 23 | #include "async-thread.h" |
25 | 24 | ||
26 | #define WORK_QUEUED_BIT 0 | 25 | #define WORK_QUEUED_BIT 0 |
27 | #define WORK_DONE_BIT 1 | 26 | #define WORK_DONE_BIT 1 |
28 | #define WORK_ORDER_DONE_BIT 2 | 27 | #define WORK_ORDER_DONE_BIT 2 |
28 | #define WORK_HIGH_PRIO_BIT 3 | ||
29 | 29 | ||
30 | /* | 30 | /* |
31 | * container for the kthread task pointer and the list of pending work | 31 | * container for the kthread task pointer and the list of pending work |
@@ -37,6 +37,7 @@ struct btrfs_worker_thread { | |||
37 | 37 | ||
38 | /* list of struct btrfs_work that are waiting for service */ | 38 | /* list of struct btrfs_work that are waiting for service */ |
39 | struct list_head pending; | 39 | struct list_head pending; |
40 | struct list_head prio_pending; | ||
40 | 41 | ||
41 | /* list of worker threads from struct btrfs_workers */ | 42 | /* list of worker threads from struct btrfs_workers */ |
42 | struct list_head worker_list; | 43 | struct list_head worker_list; |
@@ -104,10 +105,16 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers, | |||
104 | 105 | ||
105 | spin_lock_irqsave(&workers->lock, flags); | 106 | spin_lock_irqsave(&workers->lock, flags); |
106 | 107 | ||
107 | while (!list_empty(&workers->order_list)) { | 108 | while (1) { |
108 | work = list_entry(workers->order_list.next, | 109 | if (!list_empty(&workers->prio_order_list)) { |
109 | struct btrfs_work, order_list); | 110 | work = list_entry(workers->prio_order_list.next, |
110 | 111 | struct btrfs_work, order_list); | |
112 | } else if (!list_empty(&workers->order_list)) { | ||
113 | work = list_entry(workers->order_list.next, | ||
114 | struct btrfs_work, order_list); | ||
115 | } else { | ||
116 | break; | ||
117 | } | ||
111 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | 118 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
112 | break; | 119 | break; |
113 | 120 | ||
@@ -143,8 +150,15 @@ static int worker_loop(void *arg) | |||
143 | struct btrfs_work *work; | 150 | struct btrfs_work *work; |
144 | do { | 151 | do { |
145 | spin_lock_irq(&worker->lock); | 152 | spin_lock_irq(&worker->lock); |
146 | while (!list_empty(&worker->pending)) { | 153 | again_locked: |
147 | cur = worker->pending.next; | 154 | while (1) { |
155 | if (!list_empty(&worker->prio_pending)) | ||
156 | cur = worker->prio_pending.next; | ||
157 | else if (!list_empty(&worker->pending)) | ||
158 | cur = worker->pending.next; | ||
159 | else | ||
160 | break; | ||
161 | |||
148 | work = list_entry(cur, struct btrfs_work, list); | 162 | work = list_entry(cur, struct btrfs_work, list); |
149 | list_del(&work->list); | 163 | list_del(&work->list); |
150 | clear_bit(WORK_QUEUED_BIT, &work->flags); | 164 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
@@ -163,16 +177,58 @@ static int worker_loop(void *arg) | |||
163 | 177 | ||
164 | spin_lock_irq(&worker->lock); | 178 | spin_lock_irq(&worker->lock); |
165 | check_idle_worker(worker); | 179 | check_idle_worker(worker); |
166 | |||
167 | } | 180 | } |
168 | worker->working = 0; | ||
169 | if (freezing(current)) { | 181 | if (freezing(current)) { |
182 | worker->working = 0; | ||
183 | spin_unlock_irq(&worker->lock); | ||
170 | refrigerator(); | 184 | refrigerator(); |
171 | } else { | 185 | } else { |
172 | set_current_state(TASK_INTERRUPTIBLE); | ||
173 | spin_unlock_irq(&worker->lock); | 186 | spin_unlock_irq(&worker->lock); |
174 | if (!kthread_should_stop()) | 187 | if (!kthread_should_stop()) { |
175 | schedule(); | 188 | cpu_relax(); |
189 | /* | ||
190 | * we've dropped the lock, did someone else | ||
191 | * jump_in? | ||
192 | */ | ||
193 | smp_mb(); | ||
194 | if (!list_empty(&worker->pending) || | ||
195 | !list_empty(&worker->prio_pending)) | ||
196 | continue; | ||
197 | |||
198 | /* | ||
199 | * this short schedule allows more work to | ||
200 | * come in without the queue functions | ||
201 | * needing to go through wake_up_process() | ||
202 | * | ||
203 | * worker->working is still 1, so nobody | ||
204 | * is going to try and wake us up | ||
205 | */ | ||
206 | schedule_timeout(1); | ||
207 | smp_mb(); | ||
208 | if (!list_empty(&worker->pending) || | ||
209 | !list_empty(&worker->prio_pending)) | ||
210 | continue; | ||
211 | |||
212 | if (kthread_should_stop()) | ||
213 | break; | ||
214 | |||
215 | /* still no more work?, sleep for real */ | ||
216 | spin_lock_irq(&worker->lock); | ||
217 | set_current_state(TASK_INTERRUPTIBLE); | ||
218 | if (!list_empty(&worker->pending) || | ||
219 | !list_empty(&worker->prio_pending)) | ||
220 | goto again_locked; | ||
221 | |||
222 | /* | ||
223 | * this makes sure we get a wakeup when someone | ||
224 | * adds something new to the queue | ||
225 | */ | ||
226 | worker->working = 0; | ||
227 | spin_unlock_irq(&worker->lock); | ||
228 | |||
229 | if (!kthread_should_stop()) | ||
230 | schedule(); | ||
231 | } | ||
176 | __set_current_state(TASK_RUNNING); | 232 | __set_current_state(TASK_RUNNING); |
177 | } | 233 | } |
178 | } while (!kthread_should_stop()); | 234 | } while (!kthread_should_stop()); |
@@ -208,6 +264,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) | |||
208 | INIT_LIST_HEAD(&workers->worker_list); | 264 | INIT_LIST_HEAD(&workers->worker_list); |
209 | INIT_LIST_HEAD(&workers->idle_list); | 265 | INIT_LIST_HEAD(&workers->idle_list); |
210 | INIT_LIST_HEAD(&workers->order_list); | 266 | INIT_LIST_HEAD(&workers->order_list); |
267 | INIT_LIST_HEAD(&workers->prio_order_list); | ||
211 | spin_lock_init(&workers->lock); | 268 | spin_lock_init(&workers->lock); |
212 | workers->max_workers = max; | 269 | workers->max_workers = max; |
213 | workers->idle_thresh = 32; | 270 | workers->idle_thresh = 32; |
@@ -233,6 +290,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |||
233 | } | 290 | } |
234 | 291 | ||
235 | INIT_LIST_HEAD(&worker->pending); | 292 | INIT_LIST_HEAD(&worker->pending); |
293 | INIT_LIST_HEAD(&worker->prio_pending); | ||
236 | INIT_LIST_HEAD(&worker->worker_list); | 294 | INIT_LIST_HEAD(&worker->worker_list); |
237 | spin_lock_init(&worker->lock); | 295 | spin_lock_init(&worker->lock); |
238 | atomic_set(&worker->num_pending, 0); | 296 | atomic_set(&worker->num_pending, 0); |
@@ -350,13 +408,17 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
350 | { | 408 | { |
351 | struct btrfs_worker_thread *worker = work->worker; | 409 | struct btrfs_worker_thread *worker = work->worker; |
352 | unsigned long flags; | 410 | unsigned long flags; |
411 | int wake = 0; | ||
353 | 412 | ||
354 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) | 413 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
355 | goto out; | 414 | goto out; |
356 | 415 | ||
357 | spin_lock_irqsave(&worker->lock, flags); | 416 | spin_lock_irqsave(&worker->lock, flags); |
417 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) | ||
418 | list_add_tail(&work->list, &worker->prio_pending); | ||
419 | else | ||
420 | list_add_tail(&work->list, &worker->pending); | ||
358 | atomic_inc(&worker->num_pending); | 421 | atomic_inc(&worker->num_pending); |
359 | list_add_tail(&work->list, &worker->pending); | ||
360 | 422 | ||
361 | /* by definition we're busy, take ourselves off the idle | 423 | /* by definition we're busy, take ourselves off the idle |
362 | * list | 424 | * list |
@@ -368,13 +430,24 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
368 | &worker->workers->worker_list); | 430 | &worker->workers->worker_list); |
369 | spin_unlock_irqrestore(&worker->workers->lock, flags); | 431 | spin_unlock_irqrestore(&worker->workers->lock, flags); |
370 | } | 432 | } |
433 | if (!worker->working) { | ||
434 | wake = 1; | ||
435 | worker->working = 1; | ||
436 | } | ||
371 | 437 | ||
372 | spin_unlock_irqrestore(&worker->lock, flags); | 438 | spin_unlock_irqrestore(&worker->lock, flags); |
373 | 439 | if (wake) | |
440 | wake_up_process(worker->task); | ||
374 | out: | 441 | out: |
442 | |||
375 | return 0; | 443 | return 0; |
376 | } | 444 | } |
377 | 445 | ||
446 | void btrfs_set_work_high_prio(struct btrfs_work *work) | ||
447 | { | ||
448 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | ||
449 | } | ||
450 | |||
378 | /* | 451 | /* |
379 | * places a struct btrfs_work into the pending queue of one of the kthreads | 452 | * places a struct btrfs_work into the pending queue of one of the kthreads |
380 | */ | 453 | */ |
@@ -391,16 +464,25 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
391 | worker = find_worker(workers); | 464 | worker = find_worker(workers); |
392 | if (workers->ordered) { | 465 | if (workers->ordered) { |
393 | spin_lock_irqsave(&workers->lock, flags); | 466 | spin_lock_irqsave(&workers->lock, flags); |
394 | list_add_tail(&work->order_list, &workers->order_list); | 467 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { |
468 | list_add_tail(&work->order_list, | ||
469 | &workers->prio_order_list); | ||
470 | } else { | ||
471 | list_add_tail(&work->order_list, &workers->order_list); | ||
472 | } | ||
395 | spin_unlock_irqrestore(&workers->lock, flags); | 473 | spin_unlock_irqrestore(&workers->lock, flags); |
396 | } else { | 474 | } else { |
397 | INIT_LIST_HEAD(&work->order_list); | 475 | INIT_LIST_HEAD(&work->order_list); |
398 | } | 476 | } |
399 | 477 | ||
400 | spin_lock_irqsave(&worker->lock, flags); | 478 | spin_lock_irqsave(&worker->lock, flags); |
479 | |||
480 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) | ||
481 | list_add_tail(&work->list, &worker->prio_pending); | ||
482 | else | ||
483 | list_add_tail(&work->list, &worker->pending); | ||
401 | atomic_inc(&worker->num_pending); | 484 | atomic_inc(&worker->num_pending); |
402 | check_busy_worker(worker); | 485 | check_busy_worker(worker); |
403 | list_add_tail(&work->list, &worker->pending); | ||
404 | 486 | ||
405 | /* | 487 | /* |
406 | * avoid calling into wake_up_process if this thread has already | 488 | * avoid calling into wake_up_process if this thread has already |