diff options
Diffstat (limited to 'fs/btrfs/async-thread.c')
| -rw-r--r-- | fs/btrfs/async-thread.c | 117 |
1 files changed, 55 insertions, 62 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c index 7ec14097fef1..cb97174e2366 100644 --- a/fs/btrfs/async-thread.c +++ b/fs/btrfs/async-thread.c | |||
| @@ -64,6 +64,8 @@ struct btrfs_worker_thread { | |||
| 64 | int idle; | 64 | int idle; |
| 65 | }; | 65 | }; |
| 66 | 66 | ||
| 67 | static int __btrfs_start_workers(struct btrfs_workers *workers); | ||
| 68 | |||
| 67 | /* | 69 | /* |
| 68 | * btrfs_start_workers uses kthread_run, which can block waiting for memory | 70 | * btrfs_start_workers uses kthread_run, which can block waiting for memory |
| 69 | * for a very long time. It will actually throttle on page writeback, | 71 | * for a very long time. It will actually throttle on page writeback, |
| @@ -88,27 +90,10 @@ static void start_new_worker_func(struct btrfs_work *work) | |||
| 88 | { | 90 | { |
| 89 | struct worker_start *start; | 91 | struct worker_start *start; |
| 90 | start = container_of(work, struct worker_start, work); | 92 | start = container_of(work, struct worker_start, work); |
| 91 | btrfs_start_workers(start->queue, 1); | 93 | __btrfs_start_workers(start->queue); |
| 92 | kfree(start); | 94 | kfree(start); |
| 93 | } | 95 | } |
| 94 | 96 | ||
| 95 | static int start_new_worker(struct btrfs_workers *queue) | ||
| 96 | { | ||
| 97 | struct worker_start *start; | ||
| 98 | int ret; | ||
| 99 | |||
| 100 | start = kzalloc(sizeof(*start), GFP_NOFS); | ||
| 101 | if (!start) | ||
| 102 | return -ENOMEM; | ||
| 103 | |||
| 104 | start->work.func = start_new_worker_func; | ||
| 105 | start->queue = queue; | ||
| 106 | ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work); | ||
| 107 | if (ret) | ||
| 108 | kfree(start); | ||
| 109 | return ret; | ||
| 110 | } | ||
| 111 | |||
| 112 | /* | 97 | /* |
| 113 | * helper function to move a thread onto the idle list after it | 98 | * helper function to move a thread onto the idle list after it |
| 114 | * has finished some requests. | 99 | * has finished some requests. |
| @@ -153,12 +138,20 @@ static void check_busy_worker(struct btrfs_worker_thread *worker) | |||
| 153 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) | 138 | static void check_pending_worker_creates(struct btrfs_worker_thread *worker) |
| 154 | { | 139 | { |
| 155 | struct btrfs_workers *workers = worker->workers; | 140 | struct btrfs_workers *workers = worker->workers; |
| 141 | struct worker_start *start; | ||
| 156 | unsigned long flags; | 142 | unsigned long flags; |
| 157 | 143 | ||
| 158 | rmb(); | 144 | rmb(); |
| 159 | if (!workers->atomic_start_pending) | 145 | if (!workers->atomic_start_pending) |
| 160 | return; | 146 | return; |
| 161 | 147 | ||
| 148 | start = kzalloc(sizeof(*start), GFP_NOFS); | ||
| 149 | if (!start) | ||
| 150 | return; | ||
| 151 | |||
| 152 | start->work.func = start_new_worker_func; | ||
| 153 | start->queue = workers; | ||
| 154 | |||
| 162 | spin_lock_irqsave(&workers->lock, flags); | 155 | spin_lock_irqsave(&workers->lock, flags); |
| 163 | if (!workers->atomic_start_pending) | 156 | if (!workers->atomic_start_pending) |
| 164 | goto out; | 157 | goto out; |
| @@ -170,10 +163,11 @@ static void check_pending_worker_creates(struct btrfs_worker_thread *worker) | |||
| 170 | 163 | ||
| 171 | workers->num_workers_starting += 1; | 164 | workers->num_workers_starting += 1; |
| 172 | spin_unlock_irqrestore(&workers->lock, flags); | 165 | spin_unlock_irqrestore(&workers->lock, flags); |
| 173 | start_new_worker(workers); | 166 | btrfs_queue_worker(workers->atomic_worker_start, &start->work); |
| 174 | return; | 167 | return; |
| 175 | 168 | ||
| 176 | out: | 169 | out: |
| 170 | kfree(start); | ||
| 177 | spin_unlock_irqrestore(&workers->lock, flags); | 171 | spin_unlock_irqrestore(&workers->lock, flags); |
| 178 | } | 172 | } |
| 179 | 173 | ||
| @@ -331,7 +325,7 @@ again: | |||
| 331 | run_ordered_completions(worker->workers, work); | 325 | run_ordered_completions(worker->workers, work); |
| 332 | 326 | ||
| 333 | check_pending_worker_creates(worker); | 327 | check_pending_worker_creates(worker); |
| 334 | 328 | cond_resched(); | |
| 335 | } | 329 | } |
| 336 | 330 | ||
| 337 | spin_lock_irq(&worker->lock); | 331 | spin_lock_irq(&worker->lock); |
| @@ -462,56 +456,55 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max, | |||
| 462 | * starts new worker threads. This does not enforce the max worker | 456 | * starts new worker threads. This does not enforce the max worker |
| 463 | * count in case you need to temporarily go past it. | 457 | * count in case you need to temporarily go past it. |
| 464 | */ | 458 | */ |
| 465 | static int __btrfs_start_workers(struct btrfs_workers *workers, | 459 | static int __btrfs_start_workers(struct btrfs_workers *workers) |
| 466 | int num_workers) | ||
| 467 | { | 460 | { |
| 468 | struct btrfs_worker_thread *worker; | 461 | struct btrfs_worker_thread *worker; |
| 469 | int ret = 0; | 462 | int ret = 0; |
| 470 | int i; | ||
| 471 | 463 | ||
| 472 | for (i = 0; i < num_workers; i++) { | 464 | worker = kzalloc(sizeof(*worker), GFP_NOFS); |
| 473 | worker = kzalloc(sizeof(*worker), GFP_NOFS); | 465 | if (!worker) { |
| 474 | if (!worker) { | 466 | ret = -ENOMEM; |
| 475 | ret = -ENOMEM; | 467 | goto fail; |
| 476 | goto fail; | 468 | } |
| 477 | } | ||
| 478 | 469 | ||
| 479 | INIT_LIST_HEAD(&worker->pending); | 470 | INIT_LIST_HEAD(&worker->pending); |
| 480 | INIT_LIST_HEAD(&worker->prio_pending); | 471 | INIT_LIST_HEAD(&worker->prio_pending); |
| 481 | INIT_LIST_HEAD(&worker->worker_list); | 472 | INIT_LIST_HEAD(&worker->worker_list); |
| 482 | spin_lock_init(&worker->lock); | 473 | spin_lock_init(&worker->lock); |
| 483 | 474 | ||
| 484 | atomic_set(&worker->num_pending, 0); | 475 | atomic_set(&worker->num_pending, 0); |
| 485 | atomic_set(&worker->refs, 1); | 476 | atomic_set(&worker->refs, 1); |
| 486 | worker->workers = workers; | 477 | worker->workers = workers; |
| 487 | worker->task = kthread_run(worker_loop, worker, | 478 | worker->task = kthread_run(worker_loop, worker, |
| 488 | "btrfs-%s-%d", workers->name, | 479 | "btrfs-%s-%d", workers->name, |
| 489 | workers->num_workers + i); | 480 | workers->num_workers + 1); |
| 490 | if (IS_ERR(worker->task)) { | 481 | if (IS_ERR(worker->task)) { |
| 491 | ret = PTR_ERR(worker->task); | 482 | ret = PTR_ERR(worker->task); |
| 492 | kfree(worker); | 483 | kfree(worker); |
| 493 | goto fail; | 484 | goto fail; |
| 494 | } | ||
| 495 | spin_lock_irq(&workers->lock); | ||
| 496 | list_add_tail(&worker->worker_list, &workers->idle_list); | ||
| 497 | worker->idle = 1; | ||
| 498 | workers->num_workers++; | ||
| 499 | workers->num_workers_starting--; | ||
| 500 | WARN_ON(workers->num_workers_starting < 0); | ||
| 501 | spin_unlock_irq(&workers->lock); | ||
| 502 | } | 485 | } |
| 486 | spin_lock_irq(&workers->lock); | ||
| 487 | list_add_tail(&worker->worker_list, &workers->idle_list); | ||
| 488 | worker->idle = 1; | ||
| 489 | workers->num_workers++; | ||
| 490 | workers->num_workers_starting--; | ||
| 491 | WARN_ON(workers->num_workers_starting < 0); | ||
| 492 | spin_unlock_irq(&workers->lock); | ||
| 493 | |||
| 503 | return 0; | 494 | return 0; |
| 504 | fail: | 495 | fail: |
| 505 | btrfs_stop_workers(workers); | 496 | spin_lock_irq(&workers->lock); |
| 497 | workers->num_workers_starting--; | ||
| 498 | spin_unlock_irq(&workers->lock); | ||
| 506 | return ret; | 499 | return ret; |
| 507 | } | 500 | } |
| 508 | 501 | ||
| 509 | int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) | 502 | int btrfs_start_workers(struct btrfs_workers *workers) |
| 510 | { | 503 | { |
| 511 | spin_lock_irq(&workers->lock); | 504 | spin_lock_irq(&workers->lock); |
| 512 | workers->num_workers_starting += num_workers; | 505 | workers->num_workers_starting++; |
| 513 | spin_unlock_irq(&workers->lock); | 506 | spin_unlock_irq(&workers->lock); |
| 514 | return __btrfs_start_workers(workers, num_workers); | 507 | return __btrfs_start_workers(workers); |
| 515 | } | 508 | } |
| 516 | 509 | ||
| 517 | /* | 510 | /* |
| @@ -568,6 +561,7 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers) | |||
| 568 | struct btrfs_worker_thread *worker; | 561 | struct btrfs_worker_thread *worker; |
| 569 | unsigned long flags; | 562 | unsigned long flags; |
| 570 | struct list_head *fallback; | 563 | struct list_head *fallback; |
| 564 | int ret; | ||
| 571 | 565 | ||
| 572 | again: | 566 | again: |
| 573 | spin_lock_irqsave(&workers->lock, flags); | 567 | spin_lock_irqsave(&workers->lock, flags); |
| @@ -584,7 +578,9 @@ again: | |||
| 584 | workers->num_workers_starting++; | 578 | workers->num_workers_starting++; |
| 585 | spin_unlock_irqrestore(&workers->lock, flags); | 579 | spin_unlock_irqrestore(&workers->lock, flags); |
| 586 | /* we're below the limit, start another worker */ | 580 | /* we're below the limit, start another worker */ |
| 587 | __btrfs_start_workers(workers, 1); | 581 | ret = __btrfs_start_workers(workers); |
| 582 | if (ret) | ||
| 583 | goto fallback; | ||
| 588 | goto again; | 584 | goto again; |
| 589 | } | 585 | } |
| 590 | } | 586 | } |
| @@ -665,7 +661,7 @@ void btrfs_set_work_high_prio(struct btrfs_work *work) | |||
| 665 | /* | 661 | /* |
| 666 | * places a struct btrfs_work into the pending queue of one of the kthreads | 662 | * places a struct btrfs_work into the pending queue of one of the kthreads |
| 667 | */ | 663 | */ |
| 668 | int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | 664 | void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) |
| 669 | { | 665 | { |
| 670 | struct btrfs_worker_thread *worker; | 666 | struct btrfs_worker_thread *worker; |
| 671 | unsigned long flags; | 667 | unsigned long flags; |
| @@ -673,7 +669,7 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
| 673 | 669 | ||
| 674 | /* don't requeue something already on a list */ | 670 | /* don't requeue something already on a list */ |
| 675 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) | 671 | if (test_and_set_bit(WORK_QUEUED_BIT, &work->flags)) |
| 676 | goto out; | 672 | return; |
| 677 | 673 | ||
| 678 | worker = find_worker(workers); | 674 | worker = find_worker(workers); |
| 679 | if (workers->ordered) { | 675 | if (workers->ordered) { |
| @@ -712,7 +708,4 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work) | |||
| 712 | if (wake) | 708 | if (wake) |
| 713 | wake_up_process(worker->task); | 709 | wake_up_process(worker->task); |
| 714 | spin_unlock_irqrestore(&worker->lock, flags); | 710 | spin_unlock_irqrestore(&worker->lock, flags); |
| 715 | |||
| 716 | out: | ||
| 717 | return 0; | ||
| 718 | } | 711 | } |
