aboutsummaryrefslogtreecommitdiffstats
path: root/fs/btrfs/async-thread.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/btrfs/async-thread.c')
-rw-r--r--fs/btrfs/async-thread.c329
1 files changed, 272 insertions, 57 deletions
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 019e8af449ab..c0861e781cdb 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -48,6 +48,9 @@ struct btrfs_worker_thread {
48 /* number of things on the pending list */ 48 /* number of things on the pending list */
49 atomic_t num_pending; 49 atomic_t num_pending;
50 50
51 /* reference counter for this struct */
52 atomic_t refs;
53
51 unsigned long sequence; 54 unsigned long sequence;
52 55
53 /* protects the pending list. */ 56 /* protects the pending list. */
@@ -61,6 +64,51 @@ struct btrfs_worker_thread {
61}; 64};
62 65
63/* 66/*
67 * btrfs_start_workers uses kthread_run, which can block waiting for memory
68 * for a very long time. It will actually throttle on page writeback,
69 * and so it may not make progress until after our btrfs worker threads
70 * process all of the pending work structs in their queue
71 *
72 * This means we can't use btrfs_start_workers from inside a btrfs worker
73 * thread that is used as part of cleaning dirty memory, which pretty much
74 * involves all of the worker threads.
75 *
76 * Instead we have a helper queue who never has more than one thread
77 * where we scheduler thread start operations. This worker_start struct
78 * is used to contain the work and hold a pointer to the queue that needs
79 * another worker.
80 */
81struct worker_start {
82 struct btrfs_work work;
83 struct btrfs_workers *queue;
84};
85
86static void start_new_worker_func(struct btrfs_work *work)
87{
88 struct worker_start *start;
89 start = container_of(work, struct worker_start, work);
90 btrfs_start_workers(start->queue, 1);
91 kfree(start);
92}
93
94static int start_new_worker(struct btrfs_workers *queue)
95{
96 struct worker_start *start;
97 int ret;
98
99 start = kzalloc(sizeof(*start), GFP_NOFS);
100 if (!start)
101 return -ENOMEM;
102
103 start->work.func = start_new_worker_func;
104 start->queue = queue;
105 ret = btrfs_queue_worker(queue->atomic_worker_start, &start->work);
106 if (ret)
107 kfree(start);
108 return ret;
109}
110
111/*
64 * helper function to move a thread onto the idle list after it 112 * helper function to move a thread onto the idle list after it
65 * has finished some requests. 113 * has finished some requests.
66 */ 114 */
@@ -71,7 +119,12 @@ static void check_idle_worker(struct btrfs_worker_thread *worker)
71 unsigned long flags; 119 unsigned long flags;
72 spin_lock_irqsave(&worker->workers->lock, flags); 120 spin_lock_irqsave(&worker->workers->lock, flags);
73 worker->idle = 1; 121 worker->idle = 1;
74 list_move(&worker->worker_list, &worker->workers->idle_list); 122
123 /* the list may be empty if the worker is just starting */
124 if (!list_empty(&worker->worker_list)) {
125 list_move(&worker->worker_list,
126 &worker->workers->idle_list);
127 }
75 spin_unlock_irqrestore(&worker->workers->lock, flags); 128 spin_unlock_irqrestore(&worker->workers->lock, flags);
76 } 129 }
77} 130}
@@ -87,23 +140,51 @@ static void check_busy_worker(struct btrfs_worker_thread *worker)
87 unsigned long flags; 140 unsigned long flags;
88 spin_lock_irqsave(&worker->workers->lock, flags); 141 spin_lock_irqsave(&worker->workers->lock, flags);
89 worker->idle = 0; 142 worker->idle = 0;
90 list_move_tail(&worker->worker_list, 143
91 &worker->workers->worker_list); 144 if (!list_empty(&worker->worker_list)) {
145 list_move_tail(&worker->worker_list,
146 &worker->workers->worker_list);
147 }
92 spin_unlock_irqrestore(&worker->workers->lock, flags); 148 spin_unlock_irqrestore(&worker->workers->lock, flags);
93 } 149 }
94} 150}
95 151
96static noinline int run_ordered_completions(struct btrfs_workers *workers, 152static void check_pending_worker_creates(struct btrfs_worker_thread *worker)
97 struct btrfs_work *work)
98{ 153{
154 struct btrfs_workers *workers = worker->workers;
99 unsigned long flags; 155 unsigned long flags;
100 156
157 rmb();
158 if (!workers->atomic_start_pending)
159 return;
160
161 spin_lock_irqsave(&workers->lock, flags);
162 if (!workers->atomic_start_pending)
163 goto out;
164
165 workers->atomic_start_pending = 0;
166 if (workers->num_workers + workers->num_workers_starting >=
167 workers->max_workers)
168 goto out;
169
170 workers->num_workers_starting += 1;
171 spin_unlock_irqrestore(&workers->lock, flags);
172 start_new_worker(workers);
173 return;
174
175out:
176 spin_unlock_irqrestore(&workers->lock, flags);
177}
178
179static noinline int run_ordered_completions(struct btrfs_workers *workers,
180 struct btrfs_work *work)
181{
101 if (!workers->ordered) 182 if (!workers->ordered)
102 return 0; 183 return 0;
103 184
104 set_bit(WORK_DONE_BIT, &work->flags); 185 set_bit(WORK_DONE_BIT, &work->flags);
105 186
106 spin_lock_irqsave(&workers->lock, flags); 187 spin_lock(&workers->order_lock);
107 188
108 while (1) { 189 while (1) {
109 if (!list_empty(&workers->prio_order_list)) { 190 if (!list_empty(&workers->prio_order_list)) {
@@ -126,45 +207,118 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
126 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags)) 207 if (test_and_set_bit(WORK_ORDER_DONE_BIT, &work->flags))
127 break; 208 break;
128 209
129 spin_unlock_irqrestore(&workers->lock, flags); 210 spin_unlock(&workers->order_lock);
130 211
131 work->ordered_func(work); 212 work->ordered_func(work);
132 213
133 /* now take the lock again and call the freeing code */ 214 /* now take the lock again and call the freeing code */
134 spin_lock_irqsave(&workers->lock, flags); 215 spin_lock(&workers->order_lock);
135 list_del(&work->order_list); 216 list_del(&work->order_list);
136 work->ordered_free(work); 217 work->ordered_free(work);
137 } 218 }
138 219
139 spin_unlock_irqrestore(&workers->lock, flags); 220 spin_unlock(&workers->order_lock);
140 return 0; 221 return 0;
141} 222}
142 223
224static void put_worker(struct btrfs_worker_thread *worker)
225{
226 if (atomic_dec_and_test(&worker->refs))
227 kfree(worker);
228}
229
230static int try_worker_shutdown(struct btrfs_worker_thread *worker)
231{
232 int freeit = 0;
233
234 spin_lock_irq(&worker->lock);
235 spin_lock(&worker->workers->lock);
236 if (worker->workers->num_workers > 1 &&
237 worker->idle &&
238 !worker->working &&
239 !list_empty(&worker->worker_list) &&
240 list_empty(&worker->prio_pending) &&
241 list_empty(&worker->pending) &&
242 atomic_read(&worker->num_pending) == 0) {
243 freeit = 1;
244 list_del_init(&worker->worker_list);
245 worker->workers->num_workers--;
246 }
247 spin_unlock(&worker->workers->lock);
248 spin_unlock_irq(&worker->lock);
249
250 if (freeit)
251 put_worker(worker);
252 return freeit;
253}
254
255static struct btrfs_work *get_next_work(struct btrfs_worker_thread *worker,
256 struct list_head *prio_head,
257 struct list_head *head)
258{
259 struct btrfs_work *work = NULL;
260 struct list_head *cur = NULL;
261
262 if(!list_empty(prio_head))
263 cur = prio_head->next;
264
265 smp_mb();
266 if (!list_empty(&worker->prio_pending))
267 goto refill;
268
269 if (!list_empty(head))
270 cur = head->next;
271
272 if (cur)
273 goto out;
274
275refill:
276 spin_lock_irq(&worker->lock);
277 list_splice_tail_init(&worker->prio_pending, prio_head);
278 list_splice_tail_init(&worker->pending, head);
279
280 if (!list_empty(prio_head))
281 cur = prio_head->next;
282 else if (!list_empty(head))
283 cur = head->next;
284 spin_unlock_irq(&worker->lock);
285
286 if (!cur)
287 goto out_fail;
288
289out:
290 work = list_entry(cur, struct btrfs_work, list);
291
292out_fail:
293 return work;
294}
295
143/* 296/*
144 * main loop for servicing work items 297 * main loop for servicing work items
145 */ 298 */
146static int worker_loop(void *arg) 299static int worker_loop(void *arg)
147{ 300{
148 struct btrfs_worker_thread *worker = arg; 301 struct btrfs_worker_thread *worker = arg;
149 struct list_head *cur; 302 struct list_head head;
303 struct list_head prio_head;
150 struct btrfs_work *work; 304 struct btrfs_work *work;
305
306 INIT_LIST_HEAD(&head);
307 INIT_LIST_HEAD(&prio_head);
308
151 do { 309 do {
152 spin_lock_irq(&worker->lock); 310again:
153again_locked:
154 while (1) { 311 while (1) {
155 if (!list_empty(&worker->prio_pending)) 312
156 cur = worker->prio_pending.next; 313
157 else if (!list_empty(&worker->pending)) 314 work = get_next_work(worker, &prio_head, &head);
158 cur = worker->pending.next; 315 if (!work)
159 else
160 break; 316 break;
161 317
162 work = list_entry(cur, struct btrfs_work, list);
163 list_del(&work->list); 318 list_del(&work->list);
164 clear_bit(WORK_QUEUED_BIT, &work->flags); 319 clear_bit(WORK_QUEUED_BIT, &work->flags);
165 320
166 work->worker = worker; 321 work->worker = worker;
167 spin_unlock_irq(&worker->lock);
168 322
169 work->func(work); 323 work->func(work);
170 324
@@ -175,9 +329,13 @@ again_locked:
175 */ 329 */
176 run_ordered_completions(worker->workers, work); 330 run_ordered_completions(worker->workers, work);
177 331
178 spin_lock_irq(&worker->lock); 332 check_pending_worker_creates(worker);
179 check_idle_worker(worker); 333
180 } 334 }
335
336 spin_lock_irq(&worker->lock);
337 check_idle_worker(worker);
338
181 if (freezing(current)) { 339 if (freezing(current)) {
182 worker->working = 0; 340 worker->working = 0;
183 spin_unlock_irq(&worker->lock); 341 spin_unlock_irq(&worker->lock);
@@ -216,8 +374,10 @@ again_locked:
216 spin_lock_irq(&worker->lock); 374 spin_lock_irq(&worker->lock);
217 set_current_state(TASK_INTERRUPTIBLE); 375 set_current_state(TASK_INTERRUPTIBLE);
218 if (!list_empty(&worker->pending) || 376 if (!list_empty(&worker->pending) ||
219 !list_empty(&worker->prio_pending)) 377 !list_empty(&worker->prio_pending)) {
220 goto again_locked; 378 spin_unlock_irq(&worker->lock);
379 goto again;
380 }
221 381
222 /* 382 /*
223 * this makes sure we get a wakeup when someone 383 * this makes sure we get a wakeup when someone
@@ -226,8 +386,13 @@ again_locked:
226 worker->working = 0; 386 worker->working = 0;
227 spin_unlock_irq(&worker->lock); 387 spin_unlock_irq(&worker->lock);
228 388
229 if (!kthread_should_stop()) 389 if (!kthread_should_stop()) {
230 schedule(); 390 schedule_timeout(HZ * 120);
391 if (!worker->working &&
392 try_worker_shutdown(worker)) {
393 return 0;
394 }
395 }
231 } 396 }
232 __set_current_state(TASK_RUNNING); 397 __set_current_state(TASK_RUNNING);
233 } 398 }
@@ -242,41 +407,61 @@ int btrfs_stop_workers(struct btrfs_workers *workers)
242{ 407{
243 struct list_head *cur; 408 struct list_head *cur;
244 struct btrfs_worker_thread *worker; 409 struct btrfs_worker_thread *worker;
410 int can_stop;
245 411
412 spin_lock_irq(&workers->lock);
246 list_splice_init(&workers->idle_list, &workers->worker_list); 413 list_splice_init(&workers->idle_list, &workers->worker_list);
247 while (!list_empty(&workers->worker_list)) { 414 while (!list_empty(&workers->worker_list)) {
248 cur = workers->worker_list.next; 415 cur = workers->worker_list.next;
249 worker = list_entry(cur, struct btrfs_worker_thread, 416 worker = list_entry(cur, struct btrfs_worker_thread,
250 worker_list); 417 worker_list);
251 kthread_stop(worker->task); 418
252 list_del(&worker->worker_list); 419 atomic_inc(&worker->refs);
253 kfree(worker); 420 workers->num_workers -= 1;
421 if (!list_empty(&worker->worker_list)) {
422 list_del_init(&worker->worker_list);
423 put_worker(worker);
424 can_stop = 1;
425 } else
426 can_stop = 0;
427 spin_unlock_irq(&workers->lock);
428 if (can_stop)
429 kthread_stop(worker->task);
430 spin_lock_irq(&workers->lock);
431 put_worker(worker);
254 } 432 }
433 spin_unlock_irq(&workers->lock);
255 return 0; 434 return 0;
256} 435}
257 436
258/* 437/*
259 * simple init on struct btrfs_workers 438 * simple init on struct btrfs_workers
260 */ 439 */
261void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max) 440void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
441 struct btrfs_workers *async_helper)
262{ 442{
263 workers->num_workers = 0; 443 workers->num_workers = 0;
444 workers->num_workers_starting = 0;
264 INIT_LIST_HEAD(&workers->worker_list); 445 INIT_LIST_HEAD(&workers->worker_list);
265 INIT_LIST_HEAD(&workers->idle_list); 446 INIT_LIST_HEAD(&workers->idle_list);
266 INIT_LIST_HEAD(&workers->order_list); 447 INIT_LIST_HEAD(&workers->order_list);
267 INIT_LIST_HEAD(&workers->prio_order_list); 448 INIT_LIST_HEAD(&workers->prio_order_list);
268 spin_lock_init(&workers->lock); 449 spin_lock_init(&workers->lock);
450 spin_lock_init(&workers->order_lock);
269 workers->max_workers = max; 451 workers->max_workers = max;
270 workers->idle_thresh = 32; 452 workers->idle_thresh = 32;
271 workers->name = name; 453 workers->name = name;
272 workers->ordered = 0; 454 workers->ordered = 0;
455 workers->atomic_start_pending = 0;
456 workers->atomic_worker_start = async_helper;
273} 457}
274 458
275/* 459/*
276 * starts new worker threads. This does not enforce the max worker 460 * starts new worker threads. This does not enforce the max worker
277 * count in case you need to temporarily go past it. 461 * count in case you need to temporarily go past it.
278 */ 462 */
279int btrfs_start_workers(struct btrfs_workers *workers, int num_workers) 463static int __btrfs_start_workers(struct btrfs_workers *workers,
464 int num_workers)
280{ 465{
281 struct btrfs_worker_thread *worker; 466 struct btrfs_worker_thread *worker;
282 int ret = 0; 467 int ret = 0;
@@ -293,7 +478,9 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
293 INIT_LIST_HEAD(&worker->prio_pending); 478 INIT_LIST_HEAD(&worker->prio_pending);
294 INIT_LIST_HEAD(&worker->worker_list); 479 INIT_LIST_HEAD(&worker->worker_list);
295 spin_lock_init(&worker->lock); 480 spin_lock_init(&worker->lock);
481
296 atomic_set(&worker->num_pending, 0); 482 atomic_set(&worker->num_pending, 0);
483 atomic_set(&worker->refs, 1);
297 worker->workers = workers; 484 worker->workers = workers;
298 worker->task = kthread_run(worker_loop, worker, 485 worker->task = kthread_run(worker_loop, worker,
299 "btrfs-%s-%d", workers->name, 486 "btrfs-%s-%d", workers->name,
@@ -303,11 +490,12 @@ int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
303 kfree(worker); 490 kfree(worker);
304 goto fail; 491 goto fail;
305 } 492 }
306
307 spin_lock_irq(&workers->lock); 493 spin_lock_irq(&workers->lock);
308 list_add_tail(&worker->worker_list, &workers->idle_list); 494 list_add_tail(&worker->worker_list, &workers->idle_list);
309 worker->idle = 1; 495 worker->idle = 1;
310 workers->num_workers++; 496 workers->num_workers++;
497 workers->num_workers_starting--;
498 WARN_ON(workers->num_workers_starting < 0);
311 spin_unlock_irq(&workers->lock); 499 spin_unlock_irq(&workers->lock);
312 } 500 }
313 return 0; 501 return 0;
@@ -316,6 +504,14 @@ fail:
316 return ret; 504 return ret;
317} 505}
318 506
507int btrfs_start_workers(struct btrfs_workers *workers, int num_workers)
508{
509 spin_lock_irq(&workers->lock);
510 workers->num_workers_starting += num_workers;
511 spin_unlock_irq(&workers->lock);
512 return __btrfs_start_workers(workers, num_workers);
513}
514
319/* 515/*
320 * run through the list and find a worker thread that doesn't have a lot 516 * run through the list and find a worker thread that doesn't have a lot
321 * to do right now. This can return null if we aren't yet at the thread 517 * to do right now. This can return null if we aren't yet at the thread
@@ -325,7 +521,10 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
325{ 521{
326 struct btrfs_worker_thread *worker; 522 struct btrfs_worker_thread *worker;
327 struct list_head *next; 523 struct list_head *next;
328 int enforce_min = workers->num_workers < workers->max_workers; 524 int enforce_min;
525
526 enforce_min = (workers->num_workers + workers->num_workers_starting) <
527 workers->max_workers;
329 528
330 /* 529 /*
331 * if we find an idle thread, don't move it to the end of the 530 * if we find an idle thread, don't move it to the end of the
@@ -350,7 +549,6 @@ static struct btrfs_worker_thread *next_worker(struct btrfs_workers *workers)
350 */ 549 */
351 next = workers->worker_list.next; 550 next = workers->worker_list.next;
352 worker = list_entry(next, struct btrfs_worker_thread, worker_list); 551 worker = list_entry(next, struct btrfs_worker_thread, worker_list);
353 atomic_inc(&worker->num_pending);
354 worker->sequence++; 552 worker->sequence++;
355 553
356 if (worker->sequence % workers->idle_thresh == 0) 554 if (worker->sequence % workers->idle_thresh == 0)
@@ -367,35 +565,49 @@ static struct btrfs_worker_thread *find_worker(struct btrfs_workers *workers)
367{ 565{
368 struct btrfs_worker_thread *worker; 566 struct btrfs_worker_thread *worker;
369 unsigned long flags; 567 unsigned long flags;
568 struct list_head *fallback;
370 569
371again: 570again:
372 spin_lock_irqsave(&workers->lock, flags); 571 spin_lock_irqsave(&workers->lock, flags);
373 worker = next_worker(workers); 572 worker = next_worker(workers);
374 spin_unlock_irqrestore(&workers->lock, flags);
375 573
376 if (!worker) { 574 if (!worker) {
377 spin_lock_irqsave(&workers->lock, flags); 575 if (workers->num_workers + workers->num_workers_starting >=
378 if (workers->num_workers >= workers->max_workers) { 576 workers->max_workers) {
379 struct list_head *fallback = NULL; 577 goto fallback;
380 /* 578 } else if (workers->atomic_worker_start) {
381 * we have failed to find any workers, just 579 workers->atomic_start_pending = 1;
382 * return the force one 580 goto fallback;
383 */
384 if (!list_empty(&workers->worker_list))
385 fallback = workers->worker_list.next;
386 if (!list_empty(&workers->idle_list))
387 fallback = workers->idle_list.next;
388 BUG_ON(!fallback);
389 worker = list_entry(fallback,
390 struct btrfs_worker_thread, worker_list);
391 spin_unlock_irqrestore(&workers->lock, flags);
392 } else { 581 } else {
582 workers->num_workers_starting++;
393 spin_unlock_irqrestore(&workers->lock, flags); 583 spin_unlock_irqrestore(&workers->lock, flags);
394 /* we're below the limit, start another worker */ 584 /* we're below the limit, start another worker */
395 btrfs_start_workers(workers, 1); 585 __btrfs_start_workers(workers, 1);
396 goto again; 586 goto again;
397 } 587 }
398 } 588 }
589 goto found;
590
591fallback:
592 fallback = NULL;
593 /*
594 * we have failed to find any workers, just
595 * return the first one we can find.
596 */
597 if (!list_empty(&workers->worker_list))
598 fallback = workers->worker_list.next;
599 if (!list_empty(&workers->idle_list))
600 fallback = workers->idle_list.next;
601 BUG_ON(!fallback);
602 worker = list_entry(fallback,
603 struct btrfs_worker_thread, worker_list);
604found:
605 /*
606 * this makes sure the worker doesn't exit before it is placed
607 * onto a busy/idle list
608 */
609 atomic_inc(&worker->num_pending);
610 spin_unlock_irqrestore(&workers->lock, flags);
399 return worker; 611 return worker;
400} 612}
401 613
@@ -427,7 +639,7 @@ int btrfs_requeue_work(struct btrfs_work *work)
427 spin_lock(&worker->workers->lock); 639 spin_lock(&worker->workers->lock);
428 worker->idle = 0; 640 worker->idle = 0;
429 list_move_tail(&worker->worker_list, 641 list_move_tail(&worker->worker_list,
430 &worker->workers->worker_list); 642 &worker->workers->worker_list);
431 spin_unlock(&worker->workers->lock); 643 spin_unlock(&worker->workers->lock);
432 } 644 }
433 if (!worker->working) { 645 if (!worker->working) {
@@ -435,9 +647,9 @@ int btrfs_requeue_work(struct btrfs_work *work)
435 worker->working = 1; 647 worker->working = 1;
436 } 648 }
437 649
438 spin_unlock_irqrestore(&worker->lock, flags);
439 if (wake) 650 if (wake)
440 wake_up_process(worker->task); 651 wake_up_process(worker->task);
652 spin_unlock_irqrestore(&worker->lock, flags);
441out: 653out:
442 654
443 return 0; 655 return 0;
@@ -463,14 +675,18 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
463 675
464 worker = find_worker(workers); 676 worker = find_worker(workers);
465 if (workers->ordered) { 677 if (workers->ordered) {
466 spin_lock_irqsave(&workers->lock, flags); 678 /*
679 * you're not allowed to do ordered queues from an
680 * interrupt handler
681 */
682 spin_lock(&workers->order_lock);
467 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) { 683 if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags)) {
468 list_add_tail(&work->order_list, 684 list_add_tail(&work->order_list,
469 &workers->prio_order_list); 685 &workers->prio_order_list);
470 } else { 686 } else {
471 list_add_tail(&work->order_list, &workers->order_list); 687 list_add_tail(&work->order_list, &workers->order_list);
472 } 688 }
473 spin_unlock_irqrestore(&workers->lock, flags); 689 spin_unlock(&workers->order_lock);
474 } else { 690 } else {
475 INIT_LIST_HEAD(&work->order_list); 691 INIT_LIST_HEAD(&work->order_list);
476 } 692 }
@@ -481,7 +697,6 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
481 list_add_tail(&work->list, &worker->prio_pending); 697 list_add_tail(&work->list, &worker->prio_pending);
482 else 698 else
483 list_add_tail(&work->list, &worker->pending); 699 list_add_tail(&work->list, &worker->pending);
484 atomic_inc(&worker->num_pending);
485 check_busy_worker(worker); 700 check_busy_worker(worker);
486 701
487 /* 702 /*
@@ -492,10 +707,10 @@ int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
492 wake = 1; 707 wake = 1;
493 worker->working = 1; 708 worker->working = 1;
494 709
495 spin_unlock_irqrestore(&worker->lock, flags);
496
497 if (wake) 710 if (wake)
498 wake_up_process(worker->task); 711 wake_up_process(worker->task);
712 spin_unlock_irqrestore(&worker->lock, flags);
713
499out: 714out:
500 return 0; 715 return 0;
501} 716}