aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
authorChris Mason <chris.mason@oracle.com>2009-08-07 09:27:38 -0400
committerChris Mason <chris.mason@oracle.com>2009-09-11 13:31:04 -0400
commit4f878e8475a465ddbd951e06a23317303f1b5b30 (patch)
tree7e1811794d3f6eaf455a3e26afd49ba06a0db554 /fs/btrfs/async-thread.c
parent4e3f9c5042b43301d70781aee4a164a20878066b (diff)
Btrfs: reduce worker thread spin_lock_irq hold times
This changes the btrfs worker threads to batch work items into a local list. It allows us to pull work items in large chunks and significantly reduces the number of times we need to take the worker thread spinlock. Signed-off-by: Chris Mason <chris.mason@oracle.com>
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c74
1 files changed, 60 insertions, 14 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 4b4372df3b6d..6ea5cd0a595f 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -196,31 +196,73 @@ static int try_worker_shutdown(struct btrfs_worker_thread *worker)
196 return freeit; 196 return freeit;
197} 197}
198 198
199static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
200 struct list_head *prio_head,
201 struct list_head *head)
202{
203 struct btrfs_work *work = NULL;
204 struct list_head *cur = NULL;
205
206 if(!list_empty(prio_head))
207 cur = prio_head->next;
208
209 smp_mb();
210 if (!list_empty(&worker->prio_pending))
211 goto refill;
212
213 if (!list_empty(head))
214 cur = head->next;
215
216 if (cur)
217 goto out;
218
219refill:
220 spin_lock_irq(&worker->lock);
221 list_splice_tail_init(&worker->prio_pending, prio_head);
222 list_splice_tail_init(&worker->pending, head);
223
224 if (!list_empty(prio_head))
225 cur = prio_head->next;
226 else if (!list_empty(head))
227 cur = head->next;
228 spin_unlock_irq(&worker->lock);
229
230 if (!cur)
231 goto out_fail;
232
233out:
234 work = list_entry(cur, struct btrfs_work, list);
235
236out_fail:
237 return work;
238}
239
199/* 240/*
200 * main loop for servicing work items 241 * main loop for servicing work items
201 */ 242 */
202static int worker_loop(void *arg) 243static int worker_loop(void *arg)
203{ 244{
204 struct btrfs_worker_thread *worker = arg; 245 struct btrfs_worker_thread *worker = arg;
205 struct list_head *cur; 246 struct list_head head;
247 struct list_head prio_head;
206 struct btrfs_work *work; 248 struct btrfs_work *work;
249
250 INIT_LIST_HEAD(&head);
251 INIT_LIST_HEAD(&prio_head);
252
207 do { 253 do {
208 spin_lock_irq(&worker->lock); 254again:
209again_locked:
210 while (1) { 255 while (1) {
211 if (!list_empty(&worker->prio_pending)) 256
212 cur = worker->prio_pending.next; 257
213 else if (!list_empty(&worker->pending)) 258 work = get_next_work(worker, &prio_head, &head);
214 cur = worker->pending.next; 259 if (!work)
215 else
216 break; 260 break;
217 261
218 work = list_entry(cur, struct btrfs_work, list);
219 list_del(&work->list); 262 list_del(&work->list);
220 clear_bit(WORK_QUEUED_BIT, &work->flags); 263 clear_bit(WORK_QUEUED_BIT, &work->flags);
221 264
222 work->worker = worker; 265 work->worker = worker;
223 spin_unlock_irq(&worker->lock);
224 266
225 work->func(work); 267 work->func(work);
226 268
@@ -233,9 +275,11 @@ again_locked:
233 275
234 check_pending_worker_creates(worker); 276 check_pending_worker_creates(worker);
235 277
236 spin_lock_irq(&worker->lock);
237 check_idle_worker(worker);
238 } 278 }
279
280 spin_lock_irq(&worker->lock);
281 check_idle_worker(worker);
282
239 if (freezing(current)) { 283 if (freezing(current)) {
240 worker->working = 0; 284 worker->working = 0;
241 spin_unlock_irq(&worker->lock); 285 spin_unlock_irq(&worker->lock);
@@ -274,8 +318,10 @@ again_locked:
274 spin_lock_irq(&worker->lock); 318 spin_lock_irq(&worker->lock);
275 set_current_state(TASK_INTERRUPTIBLE); 319 set_current_state(TASK_INTERRUPTIBLE);
276 if (!list_empty(&worker->pending) || 320 if (!list_empty(&worker->pending) ||
277 !list_empty(&worker->prio_pending)) 321 !list_empty(&worker->prio_pending)) {
278 goto again_locked; 322 spin_unlock_irq(&worker->lock);
323 goto again;
324 }
279 325
280 /* 326 /*
281 * this makes sure we get a wakeup when someone 327 * this makes sure we get a wakeup when someone