diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-21 17:12:58 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-04-21 17:12:58 -0400 |
| commit | ccc5ff94c66e628d3c501b26ace5d4339667715d (patch) | |
| tree | 41ca2f1552864cc86bd5735c1b05d0de2898bb05 /fs/btrfs/async-thread.c | |
| parent | c19c6c32dcccfc89216bd579c0cb12d2dd45098f (diff) | |
| parent | 546888da82082555a56528730a83f0afd12f33bf (diff) | |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
* git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
Btrfs: fix btrfs fallocate oops and deadlock
Btrfs: use the right node in reada_for_balance
Btrfs: fix oops on page->mapping->host during writepage
Btrfs: add a priority queue to the async thread helpers
Btrfs: use WRITE_SYNC for synchronous writes
Diffstat (limited to 'fs/btrfs/async-thread.c')
| -rw-r--r-- | fs/btrfs/async-thread.c | 60 |
1 files changed, 47 insertions, 13 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 51bfdfc8fcd..502c3d61de6 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
| @@ -25,6 +25,7 @@ | |||
| 25 | #define WORK_QUEUED_BIT 0 | 25 | #define WORK_QUEUED_BIT 0 |
| 26 | #define WORK_DONE_BIT 1 | 26 | #define WORK_DONE_BIT 1 |
| 27 | #define WORK_ORDER_DONE_BIT 2 | 27 | #define WORK_ORDER_DONE_BIT 2 |
| 28 | #define WORK_HIGH_PRIO_BIT 3 | ||
| 28 | 29 | ||
| 29 | /* | 30 | /* |
| 30 | * container for the kthread task pointer and the list of pending work | 31 | * container for the kthread task pointer and the list of pending work |
| @@ -36,6 +37,7 @@ struct btrfs_worker_thread { | |||
| 36 | 37 | ||
| 37 | /* list of struct btrfs_work that are waiting for service */ | 38 | /* list of struct btrfs_work that are waiting for service */ |
| 38 | struct list_head pending; | 39 | struct list_head pending; |
| 40 | struct list_head prio_pending; | ||
| 39 | 41 | ||
| 40 | /* list of worker threads from struct btrfs_workers */ | 42 | /* list of worker threads from struct btrfs_workers */ |
| 41 | struct list_head worker_list; | 43 | struct list_head worker_list; |
| @@ -103,10 +105,16 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers, | |||
| 103 | 105 | ||
| 104 | spin_lock_irqsave(&workers->lock, flags); | 106 | spin_lock_irqsave(&workers->lock, flags); |
| 105 | 107 | ||
| 106 | while (!list_empty(&workers->order_list)) { | 108 | while (1) { |
| 107 | work = list_entry(workers->order_list.next, | 109 | if (!list_empty(&workers->prio_order_list)) { |
| 108 | struct btrfs_work, order_list); | 110 | work = list_entry(workers->prio_order_list.next, |
| 109 | 111 | struct btrfs_work, order_list); | |
| 112 | } else if (!list_empty(&workers->order_list)) { | ||
| 113 | work = list_entry(workers->order_list.next, | ||
| 114 | struct btrfs_work, order_list); | ||
| 115 | } else { | ||
| 116 | break; | ||
| 117 | } | ||
| 110 | if (!test_bit(WORK_DONE_BIT, &work->flags)) | 118 | if (!test_bit(WORK_DONE_BIT, &work->flags)) |
| 111 | break; | 119 | break; |
| 112 | 120 | ||
| @@ -143,8 +151,14 @@ static int worker_loop(void *arg) | |||
| 143 | do { | 151 | do { |
| 144 | spin_lock_irq(&worker->lock); | 152 | spin_lock_irq(&worker->lock); |
| 145 | again_locked: | 153 | again_locked: |
| 146 | while (!list_empty(&worker->pending)) { | 154 | while (1) { |
| 147 | cur = worker->pending.next; | 155 | if (!list_empty(&worker->prio_pending)) |
| 156 | cur = worker->prio_pending.next; | ||
| 157 | else if (!list_empty(&worker->pending)) | ||
| 158 | cur = worker->pending.next; | ||
| 159 | else | ||
| 160 | break; | ||
| 161 | |||
| 148 | work = list_entry(cur, struct btrfs_work, list); | 162 | work = list_entry(cur, struct btrfs_work, list); |
| 149 | list_del(&work->list); | 163 | list_del(&work->list); |
| 150 | clear_bit(WORK_QUEUED_BIT, &work->flags); | 164 | clear_bit(WORK_QUEUED_BIT, &work->flags); |
| @@ -163,7 +177,6 @@ again_locked: | |||
| 163 | 177 | ||
| 164 | spin_lock_irq(&worker->lock); | 178 | spin_lock_irq(&worker->lock); |
| 165 | check_idle_worker(worker); | 179 | check_idle_worker(worker); |
| 166 | |||
| 167 | } | 180 | } |
| 168 | if (freezing(current)) { | 181 | if (freezing(current)) { |
| 169 | worker->working = 0; | 182 | worker->working = 0; |
| @@ -178,7 +191,8 @@ again_locked: | |||
| 178 | * jump_in? | 191 | * jump_in? |
| 179 | */ | 192 | */ |
| 180 | smp_mb(); | 193 | smp_mb(); |
| 181 | if (!list_empty(&worker->pending)) | 194 | if (!list_empty(&worker->pending) || |
| 195 | !list_empty(&worker->prio_pending)) | ||
| 182 | continue; | 196 | continue; |
| 183 | 197 | ||
| 184 | /* | 198 | /* |
| @@ -191,7 +205,8 @@ again_locked: | |||
| 191 | */ | 205 | */ |
| 192 | schedule_timeout(1); | 206 | schedule_timeout(1); |
| 193 | smp_mb(); | 207 | smp_mb(); |
| 194 | if (!list_empty(&worker->pending)) | 208 | if (!list_empty(&worker->pending) || |
| 209 | !list_empty(&worker->prio_pending)) | ||
| 195 | continue; | 210 | continue; |
| 196 | 211 | ||
| 197 | if (kthread_should_stop()) | 212 | if (kthread_should_stop()) |
| @@ -200,7 +215,8 @@ again_locked: | |||
| 200 | /* still no more work?, sleep for real */ | 215 | /* still no more work?, sleep for real */ |
| 201 | spin_lock_irq(&worker->lock); | 216 | spin_lock_irq(&worker->lock); |
| 202 | set_current_state(TASK_INTERRUPTIBLE); | 217 | set_current_state(TASK_INTERRUPTIBLE); |
| 203 | if (!list_empty(&worker->pending)) | 218 | if (!list_empty(&worker->pending) || |
| 219 | !list_empty(&worker->prio_pending)) | ||
| 204 | goto again_locked; | 220 | goto again_locked; |
| 205 | 221 | ||
| 206 | /* | 222 | /* |
| @@ -248,6 +264,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) | |||
| 248 | INIT_LIST_HEAD(&workers->worker_list); | 264 | INIT_LIST_HEAD(&workers->worker_list); |
| 249 | INIT_LIST_HEAD(&workers->idle_list); | 265 | INIT_LIST_HEAD(&workers->idle_list); |
| 250 | INIT_LIST_HEAD(&workers->order_list); | 266 | INIT_LIST_HEAD(&workers->order_list); |
| 267 | INIT_LIST_HEAD(&workers->prio_order_list); | ||
| 251 | spin_lock_init(&workers->lock); | 268 | spin_lock_init(&workers->lock); |
| 252 | workers->max_workers = max; | 269 | workers->max_workers = max; |
| 253 | workers->idle_thresh = 32; | 270 | workers->idle_thresh = 32; |
| @@ -273,6 +290,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | |||
| 273 | } | 290 | } |
| 274 | 291 | ||
| 275 | INIT_LIST_HEAD(&worker->pending); | 292 | INIT_LIST_HEAD(&worker->pending); |
| 293 | INIT_LIST_HEAD(&worker->prio_pending); | ||
| 276 | INIT_LIST_HEAD(&worker->worker_list); | 294 | INIT_LIST_HEAD(&worker->worker_list); |
| 277 | spin_lock_init(&worker->lock); | 295 | spin_lock_init(&worker->lock); |
| 278 | atomic_set(&worker->num_pending, 0); | 296 | atomic_set(&worker->num_pending, 0); |
| @@ -396,7 +414,10 @@ int btrfs_requeue_work(struct btrfs_work *work) | |||
| 396 | goto out; | 414 | goto out; |
| 397 | 415 | ||
| 398 | spin_lock_irqsave(&worker->lock, flags); | 416 | spin_lock_irqsave(&worker->lock, flags); |
| 399 | list_add_tail(&work->list, &worker->pending); | 417 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
| 418 | list_add_tail(&work->list, &worker->prio_pending); | ||
| 419 | else | ||
| 420 | list_add_tail(&work->list, &worker->pending); | ||
| 400 | atomic_inc(&worker->num_pending); | 421 | atomic_inc(&worker->num_pending); |
| 401 | 422 | ||
| 402 | /* by definition we're busy, take ourselves off the idle | 423 | /* by definition we're busy, take ourselves off the idle |
| @@ -422,6 +443,11 @@ out: | |||
| 422 | return 0; | 443 | return 0; |
| 423 | } | 444 | } |
| 424 | 445 | ||
| 446 | void btrfs_set_work_high_prio(struct btrfs_work *work) | ||
| 447 | { | ||
| 448 | set_bit(WORK_HIGH_PRIO_BIT, &work->flags); | ||
| 449 | } | ||
| 450 | |||
| 425 | /* | 451 | /* |
| 426 | * places a struct btrfs_work into the pending queue of one of the kthreads | 452 | * places a struct btrfs_work into the pending queue of one of the kthreads |
| 427 | */ | 453 | */ |
| @@ -438,7 +464,12 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
| 438 | worker = find_worker(workers); | 464 | worker = find_worker(workers); |
| 439 | if (workers->ordered) { | 465 | if (workers->ordered) { |
| 440 | spin_lock_irqsave(&workers->lock, flags); | 466 | spin_lock_irqsave(&workers->lock, flags); |
| 441 | list_add_tail(&work->order_list, &workers->order_list); | 467 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { |
| 468 | list_add_tail(&work->order_list, | ||
| 469 | &workers->prio_order_list); | ||
| 470 | } else { | ||
| 471 | list_add_tail(&work->order_list, &workers->order_list); | ||
| 472 | } | ||
| 442 | spin_unlock_irqrestore(&workers->lock, flags); | 473 | spin_unlock_irqrestore(&workers->lock, flags); |
| 443 | } else { | 474 | } else { |
| 444 | INIT_LIST_HEAD(&work->order_list); | 475 | INIT_LIST_HEAD(&work->order_list); |
| @@ -446,7 +477,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
| 446 | 477 | ||
| 447 | spin_lock_irqsave(&worker->lock, flags); | 478 | spin_lock_irqsave(&worker->lock, flags); |
| 448 | 479 | ||
| 449 | list_add_tail(&work->list, &worker->pending); | 480 | if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) |
| 481 | list_add_tail(&work->list, &worker->prio_pending); | ||
| 482 | else | ||
| 483 | list_add_tail(&work->list, &worker->pending); | ||
| 450 | atomic_inc(&worker->num_pending); | 484 | atomic_inc(&worker->num_pending); |
| 451 | check_busy_worker(worker); | 485 | check_busy_worker(worker); |
| 452 | 486 | ||
