aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c91
1 files changed, 75 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index eebb1d839235..835fe28b87a8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -68,7 +68,7 @@ struct workqueue_struct {
68 68
69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 69/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
70 threads to each one as cpus come/go. */ 70 threads to each one as cpus come/go. */
71static DEFINE_SPINLOCK(workqueue_lock); 71static DEFINE_MUTEX(workqueue_mutex);
72static LIST_HEAD(workqueues); 72static LIST_HEAD(workqueues);
73 73
74static int singlethread_cpu; 74static int singlethread_cpu;
@@ -93,9 +93,12 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
93 spin_unlock_irqrestore(&cwq->lock, flags); 93 spin_unlock_irqrestore(&cwq->lock, flags);
94} 94}
95 95
96/* 96/**
97 * Queue work on a workqueue. Return non-zero if it was successfully 97 * queue_work - queue work on a workqueue
98 * added. 98 * @wq: workqueue to use
99 * @work: work to queue
100 *
101 * Returns non-zero if it was successfully added.
99 * 102 *
100 * We queue the work to the CPU it was submitted, but there is no 103 * We queue the work to the CPU it was submitted, but there is no
101 * guarantee that it will be processed by that CPU. 104 * guarantee that it will be processed by that CPU.
@@ -128,6 +131,14 @@ static void delayed_work_timer_fn(unsigned long __data)
128 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); 131 __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
129} 132}
130 133
134/**
135 * queue_delayed_work - queue work on a workqueue after delay
136 * @wq: workqueue to use
137 * @work: work to queue
138 * @delay: number of jiffies to wait before queueing
139 *
140 * Returns non-zero if it was successfully added.
141 */
131int fastcall queue_delayed_work(struct workqueue_struct *wq, 142int fastcall queue_delayed_work(struct workqueue_struct *wq,
132 struct work_struct *work, unsigned long delay) 143 struct work_struct *work, unsigned long delay)
133{ 144{
@@ -150,6 +161,15 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq,
150} 161}
151EXPORT_SYMBOL_GPL(queue_delayed_work); 162EXPORT_SYMBOL_GPL(queue_delayed_work);
152 163
164/**
165 * queue_delayed_work_on - queue work on specific CPU after delay
166 * @cpu: CPU number to execute work on
167 * @wq: workqueue to use
168 * @work: work to queue
169 * @delay: number of jiffies to wait before queueing
170 *
171 * Returns non-zero if it was successfully added.
172 */
153int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, 173int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
154 struct work_struct *work, unsigned long delay) 174 struct work_struct *work, unsigned long delay)
155{ 175{
@@ -275,8 +295,9 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
275 } 295 }
276} 296}
277 297
278/* 298/**
279 * flush_workqueue - ensure that any scheduled work has run to completion. 299 * flush_workqueue - ensure that any scheduled work has run to completion.
300 * @wq: workqueue to flush
280 * 301 *
281 * Forces execution of the workqueue and blocks until its completion. 302 * Forces execution of the workqueue and blocks until its completion.
282 * This is typically used in driver shutdown handlers. 303 * This is typically used in driver shutdown handlers.
@@ -299,10 +320,10 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
299 } else { 320 } else {
300 int cpu; 321 int cpu;
301 322
302 lock_cpu_hotplug(); 323 mutex_lock(&workqueue_mutex);
303 for_each_online_cpu(cpu) 324 for_each_online_cpu(cpu)
304 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); 325 flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu));
305 unlock_cpu_hotplug(); 326 mutex_unlock(&workqueue_mutex);
306 } 327 }
307} 328}
308EXPORT_SYMBOL_GPL(flush_workqueue); 329EXPORT_SYMBOL_GPL(flush_workqueue);
@@ -350,8 +371,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
350 } 371 }
351 372
352 wq->name = name; 373 wq->name = name;
353 /* We don't need the distraction of CPUs appearing and vanishing. */ 374 mutex_lock(&workqueue_mutex);
354 lock_cpu_hotplug();
355 if (singlethread) { 375 if (singlethread) {
356 INIT_LIST_HEAD(&wq->list); 376 INIT_LIST_HEAD(&wq->list);
357 p = create_workqueue_thread(wq, singlethread_cpu); 377 p = create_workqueue_thread(wq, singlethread_cpu);
@@ -360,9 +380,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
360 else 380 else
361 wake_up_process(p); 381 wake_up_process(p);
362 } else { 382 } else {
363 spin_lock(&workqueue_lock);
364 list_add(&wq->list, &workqueues); 383 list_add(&wq->list, &workqueues);
365 spin_unlock(&workqueue_lock);
366 for_each_online_cpu(cpu) { 384 for_each_online_cpu(cpu) {
367 p = create_workqueue_thread(wq, cpu); 385 p = create_workqueue_thread(wq, cpu);
368 if (p) { 386 if (p) {
@@ -372,7 +390,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
372 destroy = 1; 390 destroy = 1;
373 } 391 }
374 } 392 }
375 unlock_cpu_hotplug(); 393 mutex_unlock(&workqueue_mutex);
376 394
377 /* 395 /*
378 * Was there any error during startup? If yes then clean up: 396 * Was there any error during startup? If yes then clean up:
@@ -400,6 +418,12 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu)
400 kthread_stop(p); 418 kthread_stop(p);
401} 419}
402 420
421/**
422 * destroy_workqueue - safely terminate a workqueue
423 * @wq: target workqueue
424 *
425 * Safely destroy a workqueue. All work currently pending will be done first.
426 */
403void destroy_workqueue(struct workqueue_struct *wq) 427void destroy_workqueue(struct workqueue_struct *wq)
404{ 428{
405 int cpu; 429 int cpu;
@@ -407,17 +431,15 @@ void destroy_workqueue(struct workqueue_struct *wq)
407 flush_workqueue(wq); 431 flush_workqueue(wq);
408 432
409 /* We don't need the distraction of CPUs appearing and vanishing. */ 433 /* We don't need the distraction of CPUs appearing and vanishing. */
410 lock_cpu_hotplug(); 434 mutex_lock(&workqueue_mutex);
411 if (is_single_threaded(wq)) 435 if (is_single_threaded(wq))
412 cleanup_workqueue_thread(wq, singlethread_cpu); 436 cleanup_workqueue_thread(wq, singlethread_cpu);
413 else { 437 else {
414 for_each_online_cpu(cpu) 438 for_each_online_cpu(cpu)
415 cleanup_workqueue_thread(wq, cpu); 439 cleanup_workqueue_thread(wq, cpu);
416 spin_lock(&workqueue_lock);
417 list_del(&wq->list); 440 list_del(&wq->list);
418 spin_unlock(&workqueue_lock);
419 } 441 }
420 unlock_cpu_hotplug(); 442 mutex_unlock(&workqueue_mutex);
421 free_percpu(wq->cpu_wq); 443 free_percpu(wq->cpu_wq);
422 kfree(wq); 444 kfree(wq);
423} 445}
@@ -425,18 +447,41 @@ EXPORT_SYMBOL_GPL(destroy_workqueue);
425 447
426static struct workqueue_struct *keventd_wq; 448static struct workqueue_struct *keventd_wq;
427 449
450/**
451 * schedule_work - put work task in global workqueue
452 * @work: job to be done
453 *
454 * This puts a job in the kernel-global workqueue.
455 */
428int fastcall schedule_work(struct work_struct *work) 456int fastcall schedule_work(struct work_struct *work)
429{ 457{
430 return queue_work(keventd_wq, work); 458 return queue_work(keventd_wq, work);
431} 459}
432EXPORT_SYMBOL(schedule_work); 460EXPORT_SYMBOL(schedule_work);
433 461
462/**
463 * schedule_delayed_work - put work task in global workqueue after delay
464 * @work: job to be done
465 * @delay: number of jiffies to wait
466 *
467 * After waiting for a given time this puts a job in the kernel-global
468 * workqueue.
469 */
434int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) 470int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay)
435{ 471{
436 return queue_delayed_work(keventd_wq, work, delay); 472 return queue_delayed_work(keventd_wq, work, delay);
437} 473}
438EXPORT_SYMBOL(schedule_delayed_work); 474EXPORT_SYMBOL(schedule_delayed_work);
439 475
476/**
477 * schedule_delayed_work_on - queue work in global workqueue on CPU after delay
478 * @cpu: cpu to use
479 * @work: job to be done
480 * @delay: number of jiffies to wait
481 *
482 * After waiting for a given time this puts a job in the kernel-global
483 * workqueue on the specified CPU.
484 */
440int schedule_delayed_work_on(int cpu, 485int schedule_delayed_work_on(int cpu,
441 struct work_struct *work, unsigned long delay) 486 struct work_struct *work, unsigned long delay)
442{ 487{
@@ -465,11 +510,13 @@ int schedule_on_each_cpu(void (*func)(void *info), void *info)
465 if (!works) 510 if (!works)
466 return -ENOMEM; 511 return -ENOMEM;
467 512
513 mutex_lock(&workqueue_mutex);
468 for_each_online_cpu(cpu) { 514 for_each_online_cpu(cpu) {
469 INIT_WORK(per_cpu_ptr(works, cpu), func, info); 515 INIT_WORK(per_cpu_ptr(works, cpu), func, info);
470 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), 516 __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu),
471 per_cpu_ptr(works, cpu)); 517 per_cpu_ptr(works, cpu));
472 } 518 }
519 mutex_unlock(&workqueue_mutex);
473 flush_workqueue(keventd_wq); 520 flush_workqueue(keventd_wq);
474 free_percpu(works); 521 free_percpu(works);
475 return 0; 522 return 0;
@@ -585,6 +632,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
585 632
586 switch (action) { 633 switch (action) {
587 case CPU_UP_PREPARE: 634 case CPU_UP_PREPARE:
635 mutex_lock(&workqueue_mutex);
588 /* Create a new workqueue thread for it. */ 636 /* Create a new workqueue thread for it. */
589 list_for_each_entry(wq, &workqueues, list) { 637 list_for_each_entry(wq, &workqueues, list) {
590 if (!create_workqueue_thread(wq, hotcpu)) { 638 if (!create_workqueue_thread(wq, hotcpu)) {
@@ -603,6 +651,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
603 kthread_bind(cwq->thread, hotcpu); 651 kthread_bind(cwq->thread, hotcpu);
604 wake_up_process(cwq->thread); 652 wake_up_process(cwq->thread);
605 } 653 }
654 mutex_unlock(&workqueue_mutex);
606 break; 655 break;
607 656
608 case CPU_UP_CANCELED: 657 case CPU_UP_CANCELED:
@@ -614,6 +663,15 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
614 any_online_cpu(cpu_online_map)); 663 any_online_cpu(cpu_online_map));
615 cleanup_workqueue_thread(wq, hotcpu); 664 cleanup_workqueue_thread(wq, hotcpu);
616 } 665 }
666 mutex_unlock(&workqueue_mutex);
667 break;
668
669 case CPU_DOWN_PREPARE:
670 mutex_lock(&workqueue_mutex);
671 break;
672
673 case CPU_DOWN_FAILED:
674 mutex_unlock(&workqueue_mutex);
617 break; 675 break;
618 676
619 case CPU_DEAD: 677 case CPU_DEAD:
@@ -621,6 +679,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
621 cleanup_workqueue_thread(wq, hotcpu); 679 cleanup_workqueue_thread(wq, hotcpu);
622 list_for_each_entry(wq, &workqueues, list) 680 list_for_each_entry(wq, &workqueues, list)
623 take_over_work(wq, hotcpu); 681 take_over_work(wq, hotcpu);
682 mutex_unlock(&workqueue_mutex);
624 break; 683 break;
625 } 684 }
626 685