aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-08-05 16:36:45 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:04 -0400
commit4e3f9c5042b43301d70781aee4a164a20878066b (patch)
treeb2ace00c27553ec2a10958e17d6b90ea264d5886 /fs/btrfs
parent40431d6c1288793a682fc6f5e5b5c9d5cac34608 (diff)
Btrfs: keep irqs on more often in the worker threads
The btrfs worker thread spinlock was being used both for the queueing of IO and for the processing of ordered events. The ordered events never happen from end_io handlers, and so they don't need to use the _irq version of spinlocks. This adds a dedicated lock to the ordered lists so they don't have to run with irqs off. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs')
-rw-r--r--fs/btrfs/async-thread.c23
-rw-r--r--fs/btrfs/async-thread.h3
2 files changed, 16 insertions, 10 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index f10c895224ae..4b4372df3b6d 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -124,14 +124,12 @@ out:
124static noinline int run_ordered_completions(struct btrfs_workers *workers, 124static noinline int run_ordered_completions(struct btrfs_workers *workers,
125 struct btrfs_work *work) 125 struct btrfs_work *work)
126{ 126{
127 unsigned long flags;
128
129 if (!workers->ordered) 127 if (!workers->ordered)
130 return 0; 128 return 0;
131 129
132 set_bit(WORK_DONE_BIT, &work->flags); 130 set_bit(WORK_DONE_BIT, &work->flags);
133 131
134 spin_lock_irqsave(&workers->lock, flags); 132 spin_lock(&workers->order_lock);
135 133
136 while (1) { 134 while (1) {
137 if (!list_empty(&workers->prio_order_list)) { 135 if (!list_empty(&workers->prio_order_list)) {
@@ -154,17 +152,17 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
154 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) 152 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
155 break; 153 break;
156 154
157 spin_unlock_irqrestore(&workers->lock, flags); 155 spin_unlock(&workers->order_lock);
158 156
159 work->ordered_func(work); 157 work->ordered_func(work);
160 158
161 /* now take the lock again and call the freeing code */ 159 /* now take the lock again and call the freeing code */
162 spin_lock_irqsave(&workers->lock, flags); 160 spin_lock(&workers->order_lock);
163 list_del(&work->order_list); 161 list_del(&work->order_list);
164 work->ordered_free(work); 162 work->ordered_free(work);
165 } 163 }
166 164
167 spin_unlock_irqrestore(&workers->lock, flags); 165 spin_unlock(&workers->order_lock);
168 return 0; 166 return 0;
169} 167}
170 168
@@ -345,6 +343,7 @@ void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max)
345 INIT_LIST_HEAD(&workers->order_list); 343 INIT_LIST_HEAD(&workers->order_list);
346 INIT_LIST_HEAD(&workers->prio_order_list); 344 INIT_LIST_HEAD(&workers->prio_order_list);
347 spin_lock_init(&workers->lock); 345 spin_lock_init(&workers->lock);
346 spin_lock_init(&workers->order_lock);
348 workers->max_workers = max; 347 workers->max_workers = max;
349 workers->idle_thresh = 32; 348 workers->idle_thresh = 32;
350 workers->name = name; 349 workers->name = name;
@@ -374,6 +373,7 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
374 INIT_LIST_HEAD(&worker->prio_pending); 373 INIT_LIST_HEAD(&worker->prio_pending);
375 INIT_LIST_HEAD(&worker->worker_list); 374 INIT_LIST_HEAD(&worker->worker_list);
376 spin_lock_init(&worker->lock); 375 spin_lock_init(&worker->lock);
376
377 atomic_set(&worker->num_pending, 0); 377 atomic_set(&worker->num_pending, 0);
378 atomic_set(&worker->refs, 1); 378 atomic_set(&worker->refs, 1);
379 worker->workers = workers; 379 worker->workers = workers;
@@ -453,10 +453,8 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
453again: 453again:
454 spin_lock_irqsave(&workers->lock, flags); 454 spin_lock_irqsave(&workers->lock, flags);
455 worker = next_worker(workers); 455 worker = next_worker(workers);
456 spin_unlock_irqrestore(&workers->lock, flags);
457 456
458 if (!worker) { 457 if (!worker) {
459 spin_lock_irqsave(&workers->lock, flags);
460 if (workers->num_workers >= workers->max_workers) { 458 if (workers->num_workers >= workers->max_workers) {
461 goto fallback; 459 goto fallback;
462 } else if (workers->atomic_worker_start) { 460 } else if (workers->atomic_worker_start) {
@@ -469,6 +467,7 @@ again:
469 goto again; 467 goto again;
470 } 468 }
471 } 469 }
470 spin_unlock_irqrestore(&workers->lock, flags);
472 return worker; 471 return worker;
473 472
474fallback: 473fallback:
@@ -552,14 +551,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
552 551
553 worker = find_worker(workers); 552 worker = find_worker(workers);
554 if (workers->ordered) { 553 if (workers->ordered) {
555 spin_lock_irqsave(&workers->lock, flags); 554 /*
555 * you're not allowed to do ordered queues from an
556 * interrupt handler
557 */
558 spin_lock(&workers->order_lock);
556 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { 559 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
557 list_add_tail(&work->order_list, 560 list_add_tail(&work->order_list,
558 &workers->prio_order_list); 561 &workers->prio_order_list);
559 } else { 562 } else {
560 list_add_tail(&work->order_list, &workers->order_list); 563 list_add_tail(&work->order_list, &workers->order_list);
561 } 564 }
562 spin_unlock_irqrestore(&workers->lock, flags); 565 spin_unlock(&workers->order_lock);
563 } else { 566 } else {
564 INIT_LIST_HEAD(&work->order_list); 567 INIT_LIST_HEAD(&work->order_list);
565 } 568 }
diff --git a/fs/btrfs/async-thread.h b/fs/btrfs/async-thread.h
index a562ad8d83aa..fc089b95ec14 100644
--- a/fs/btrfs/async-thread.h
+++ b/fs/btrfs/async-thread.h
@@ -99,6 +99,9 @@ struct btrfs_workers {
99 /* lock for finding the next worker thread to queue on */ 99 /* lock for finding the next worker thread to queue on */
100 spinlock_t lock; 100 spinlock_t lock;
101 101
102 /* lock for the ordered lists */
103 spinlock_t order_lock;
104
102 /* extra name for this worker, used for current->name */ 105 /* extra name for this worker, used for current->name */
103 char *name; 106 char *name;
104}; 107};