diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 182 | 
1 files changed, 131 insertions, 51 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 740c5abceb07..835fe28b87a8 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c  | |||
| @@ -51,7 +51,7 @@ struct cpu_workqueue_struct { | |||
| 51 | wait_queue_head_t work_done; | 51 | wait_queue_head_t work_done; | 
| 52 | 52 | ||
| 53 | struct workqueue_struct *wq; | 53 | struct workqueue_struct *wq; | 
| 54 | task_t *thread; | 54 | struct task_struct *thread; | 
| 55 | 55 | ||
| 56 | int run_depth; /* Detect run_workqueue() recursion depth */ | 56 | int run_depth; /* Detect run_workqueue() recursion depth */ | 
| 57 | } ____cacheline_aligned; | 57 | } ____cacheline_aligned; | 
| @@ -68,7 +68,7 @@ struct workqueue_struct { | |||
| 68 | 68 | ||
| 69 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 69 | /* All the per-cpu workqueues on the system, for hotplug cpu to add/remove | 
| 70 | threads to each one as cpus come/go. */ | 70 | threads to each one as cpus come/go. */ | 
| 71 | static DEFINE_SPINLOCK(workqueue_lock); | 71 | static DEFINE_MUTEX(workqueue_mutex); | 
| 72 | static LIST_HEAD(workqueues); | 72 | static LIST_HEAD(workqueues); | 
| 73 | 73 | ||
| 74 | static int singlethread_cpu; | 74 | static int singlethread_cpu; | 
| @@ -93,9 +93,12 @@ static void __queue_work(struct cpu_workqueue_struct *cwq, | |||
| 93 | spin_unlock_irqrestore(&cwq->lock, flags); | 93 | spin_unlock_irqrestore(&cwq->lock, flags); | 
| 94 | } | 94 | } | 
| 95 | 95 | ||
| 96 | /* | 96 | /** | 
| 97 | * Queue work on a workqueue. Return non-zero if it was successfully | 97 | * queue_work - queue work on a workqueue | 
| 98 | * added. | 98 | * @wq: workqueue to use | 
| 99 | * @work: work to queue | ||
| 100 | * | ||
| 101 | * Returns non-zero if it was successfully added. | ||
| 99 | * | 102 | * | 
| 100 | * We queue the work to the CPU it was submitted, but there is no | 103 | * We queue the work to the CPU it was submitted, but there is no | 
| 101 | * guarantee that it will be processed by that CPU. | 104 | * guarantee that it will be processed by that CPU. | 
| @@ -114,6 +117,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) | |||
| 114 | put_cpu(); | 117 | put_cpu(); | 
| 115 | return ret; | 118 | return ret; | 
| 116 | } | 119 | } | 
| 120 | EXPORT_SYMBOL_GPL(queue_work); | ||
| 117 | 121 | ||
| 118 | static void delayed_work_timer_fn(unsigned long __data) | 122 | static void delayed_work_timer_fn(unsigned long __data) | 
| 119 | { | 123 | { | 
| @@ -127,6 +131,14 @@ static void delayed_work_timer_fn(unsigned long __data) | |||
| 127 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 131 | __queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work); | 
| 128 | } | 132 | } | 
| 129 | 133 | ||
| 134 | /** | ||
| 135 | * queue_delayed_work - queue work on a workqueue after delay | ||
| 136 | * @wq: workqueue to use | ||
| 137 | * @work: work to queue | ||
| 138 | * @delay: number of jiffies to wait before queueing | ||
| 139 | * | ||
| 140 | * Returns non-zero if it was successfully added. | ||
| 141 | */ | ||
| 130 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 142 | int fastcall queue_delayed_work(struct workqueue_struct *wq, | 
| 131 | struct work_struct *work, unsigned long delay) | 143 | struct work_struct *work, unsigned long delay) | 
| 132 | { | 144 | { | 
| @@ -147,6 +159,38 @@ int fastcall queue_delayed_work(struct workqueue_struct *wq, | |||
| 147 | } | 159 | } | 
| 148 | return ret; | 160 | return ret; | 
| 149 | } | 161 | } | 
| 162 | EXPORT_SYMBOL_GPL(queue_delayed_work); | ||
| 163 | |||
| 164 | /** | ||
| 165 | * queue_delayed_work_on - queue work on specific CPU after delay | ||
| 166 | * @cpu: CPU number to execute work on | ||
| 167 | * @wq: workqueue to use | ||
| 168 | * @work: work to queue | ||
| 169 | * @delay: number of jiffies to wait before queueing | ||
| 170 | * | ||
| 171 | * Returns non-zero if it was successfully added. | ||
| 172 | */ | ||
| 173 | int queue_delayed_work_on(int cpu, struct workqueue_struct *wq, | ||
| 174 | struct work_struct *work, unsigned long delay) | ||
| 175 | { | ||
| 176 | int ret = 0; | ||
| 177 | struct timer_list *timer = &work->timer; | ||
| 178 | |||
| 179 | if (!test_and_set_bit(0, &work->pending)) { | ||
| 180 | BUG_ON(timer_pending(timer)); | ||
| 181 | BUG_ON(!list_empty(&work->entry)); | ||
| 182 | |||
| 183 | /* This stores wq for the moment, for the timer_fn */ | ||
| 184 | work->wq_data = wq; | ||
| 185 | timer->expires = jiffies + delay; | ||
| 186 | timer->data = (unsigned long)work; | ||
| 187 | timer->function = delayed_work_timer_fn; | ||
| 188 | add_timer_on(timer, cpu); | ||
| 189 | ret = 1; | ||
| 190 | } | ||
| 191 | return ret; | ||
| 192 | } | ||
| 193 | EXPORT_SYMBOL_GPL(queue_delayed_work_on); | ||
| 150 | 194 | ||
| 151 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 195 | static void run_workqueue(struct cpu_workqueue_struct *cwq) | 
| 152 | { | 196 | { | 
| @@ -251,8 +295,9 @@ static void flush_cpu_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 251 | } | 295 | } | 
| 252 | } | 296 | } | 
| 253 | 297 | ||
| 254 | /* | 298 | /** | 
| 255 | * flush_workqueue - ensure that any scheduled work has run to completion. | 299 | * flush_workqueue - ensure that any scheduled work has run to completion. | 
| 300 | * @wq: workqueue to flush | ||
| 256 | * | 301 | * | 
| 257 | * Forces execution of the workqueue and blocks until its completion. | 302 | * Forces execution of the workqueue and blocks until its completion. | 
| 258 | * This is typically used in driver shutdown handlers. | 303 | * This is typically used in driver shutdown handlers. | 
| @@ -275,12 +320,13 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
| 275 | } else { | 320 | } else { | 
| 276 | int cpu; | 321 | int cpu; | 
| 277 | 322 | ||
| 278 | lock_cpu_hotplug(); | 323 | mutex_lock(&workqueue_mutex); | 
| 279 | for_each_online_cpu(cpu) | 324 | for_each_online_cpu(cpu) | 
| 280 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 325 | flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, cpu)); | 
| 281 | unlock_cpu_hotplug(); | 326 | mutex_unlock(&workqueue_mutex); | 
| 282 | } | 327 | } | 
| 283 | } | 328 | } | 
| 329 | EXPORT_SYMBOL_GPL(flush_workqueue); | ||
| 284 | 330 | ||
| 285 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 331 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 
| 286 | int cpu) | 332 | int cpu) | 
| @@ -325,8 +371,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 325 | } | 371 | } | 
| 326 | 372 | ||
| 327 | wq->name = name; | 373 | wq->name = name; | 
| 328 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 374 | mutex_lock(&workqueue_mutex); | 
| 329 | lock_cpu_hotplug(); | ||
| 330 | if (singlethread) { | 375 | if (singlethread) { | 
| 331 | INIT_LIST_HEAD(&wq->list); | 376 | INIT_LIST_HEAD(&wq->list); | 
| 332 | p = create_workqueue_thread(wq, singlethread_cpu); | 377 | p = create_workqueue_thread(wq, singlethread_cpu); | 
| @@ -335,9 +380,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 335 | else | 380 | else | 
| 336 | wake_up_process(p); | 381 | wake_up_process(p); | 
| 337 | } else { | 382 | } else { | 
| 338 | spin_lock(&workqueue_lock); | ||
| 339 | list_add(&wq->list, &workqueues); | 383 | list_add(&wq->list, &workqueues); | 
| 340 | spin_unlock(&workqueue_lock); | ||
| 341 | for_each_online_cpu(cpu) { | 384 | for_each_online_cpu(cpu) { | 
| 342 | p = create_workqueue_thread(wq, cpu); | 385 | p = create_workqueue_thread(wq, cpu); | 
| 343 | if (p) { | 386 | if (p) { | 
| @@ -347,7 +390,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 347 | destroy = 1; | 390 | destroy = 1; | 
| 348 | } | 391 | } | 
| 349 | } | 392 | } | 
| 350 | unlock_cpu_hotplug(); | 393 | mutex_unlock(&workqueue_mutex); | 
| 351 | 394 | ||
| 352 | /* | 395 | /* | 
| 353 | * Was there any error during startup? If yes then clean up: | 396 | * Was there any error during startup? If yes then clean up: | 
| @@ -358,6 +401,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 358 | } | 401 | } | 
| 359 | return wq; | 402 | return wq; | 
| 360 | } | 403 | } | 
| 404 | EXPORT_SYMBOL_GPL(__create_workqueue); | ||
| 361 | 405 | ||
| 362 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) | 406 | static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) | 
| 363 | { | 407 | { | 
| @@ -374,6 +418,12 @@ static void cleanup_workqueue_thread(struct workqueue_struct *wq, int cpu) | |||
| 374 | kthread_stop(p); | 418 | kthread_stop(p); | 
| 375 | } | 419 | } | 
| 376 | 420 | ||
| 421 | /** | ||
| 422 | * destroy_workqueue - safely terminate a workqueue | ||
| 423 | * @wq: target workqueue | ||
| 424 | * | ||
| 425 | * Safely destroy a workqueue. All work currently pending will be done first. | ||
| 426 | */ | ||
| 377 | void destroy_workqueue(struct workqueue_struct *wq) | 427 | void destroy_workqueue(struct workqueue_struct *wq) | 
| 378 | { | 428 | { | 
| 379 | int cpu; | 429 | int cpu; | 
| @@ -381,69 +431,94 @@ void destroy_workqueue(struct workqueue_struct *wq) | |||
| 381 | flush_workqueue(wq); | 431 | flush_workqueue(wq); | 
| 382 | 432 | ||
| 383 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 433 | /* We don't need the distraction of CPUs appearing and vanishing. */ | 
| 384 | lock_cpu_hotplug(); | 434 | mutex_lock(&workqueue_mutex); | 
| 385 | if (is_single_threaded(wq)) | 435 | if (is_single_threaded(wq)) | 
| 386 | cleanup_workqueue_thread(wq, singlethread_cpu); | 436 | cleanup_workqueue_thread(wq, singlethread_cpu); | 
| 387 | else { | 437 | else { | 
| 388 | for_each_online_cpu(cpu) | 438 | for_each_online_cpu(cpu) | 
| 389 | cleanup_workqueue_thread(wq, cpu); | 439 | cleanup_workqueue_thread(wq, cpu); | 
| 390 | spin_lock(&workqueue_lock); | ||
| 391 | list_del(&wq->list); | 440 | list_del(&wq->list); | 
| 392 | spin_unlock(&workqueue_lock); | ||
| 393 | } | 441 | } | 
| 394 | unlock_cpu_hotplug(); | 442 | mutex_unlock(&workqueue_mutex); | 
| 395 | free_percpu(wq->cpu_wq); | 443 | free_percpu(wq->cpu_wq); | 
| 396 | kfree(wq); | 444 | kfree(wq); | 
| 397 | } | 445 | } | 
| 446 | EXPORT_SYMBOL_GPL(destroy_workqueue); | ||
| 398 | 447 | ||
| 399 | static struct workqueue_struct *keventd_wq; | 448 | static struct workqueue_struct *keventd_wq; | 
| 400 | 449 | ||
| 450 | /** | ||
| 451 | * schedule_work - put work task in global workqueue | ||
| 452 | * @work: job to be done | ||
| 453 | * | ||
| 454 | * This puts a job in the kernel-global workqueue. | ||
| 455 | */ | ||
| 401 | int fastcall schedule_work(struct work_struct *work) | 456 | int fastcall schedule_work(struct work_struct *work) | 
| 402 | { | 457 | { | 
| 403 | return queue_work(keventd_wq, work); | 458 | return queue_work(keventd_wq, work); | 
| 404 | } | 459 | } | 
| 460 | EXPORT_SYMBOL(schedule_work); | ||
| 405 | 461 | ||
| 462 | /** | ||
| 463 | * schedule_delayed_work - put work task in global workqueue after delay | ||
| 464 | * @work: job to be done | ||
| 465 | * @delay: number of jiffies to wait | ||
| 466 | * | ||
| 467 | * After waiting for a given time this puts a job in the kernel-global | ||
| 468 | * workqueue. | ||
| 469 | */ | ||
| 406 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 470 | int fastcall schedule_delayed_work(struct work_struct *work, unsigned long delay) | 
| 407 | { | 471 | { | 
| 408 | return queue_delayed_work(keventd_wq, work, delay); | 472 | return queue_delayed_work(keventd_wq, work, delay); | 
| 409 | } | 473 | } | 
| 474 | EXPORT_SYMBOL(schedule_delayed_work); | ||
| 410 | 475 | ||
| 476 | /** | ||
| 477 | * schedule_delayed_work_on - queue work in global workqueue on CPU after delay | ||
| 478 | * @cpu: cpu to use | ||
| 479 | * @work: job to be done | ||
| 480 | * @delay: number of jiffies to wait | ||
| 481 | * | ||
| 482 | * After waiting for a given time this puts a job in the kernel-global | ||
| 483 | * workqueue on the specified CPU. | ||
| 484 | */ | ||
| 411 | int schedule_delayed_work_on(int cpu, | 485 | int schedule_delayed_work_on(int cpu, | 
| 412 | struct work_struct *work, unsigned long delay) | 486 | struct work_struct *work, unsigned long delay) | 
| 413 | { | 487 | { | 
| 414 | int ret = 0; | 488 | return queue_delayed_work_on(cpu, keventd_wq, work, delay); | 
| 415 | struct timer_list *timer = &work->timer; | ||
| 416 | |||
| 417 | if (!test_and_set_bit(0, &work->pending)) { | ||
| 418 | BUG_ON(timer_pending(timer)); | ||
| 419 | BUG_ON(!list_empty(&work->entry)); | ||
| 420 | /* This stores keventd_wq for the moment, for the timer_fn */ | ||
| 421 | work->wq_data = keventd_wq; | ||
| 422 | timer->expires = jiffies + delay; | ||
| 423 | timer->data = (unsigned long)work; | ||
| 424 | timer->function = delayed_work_timer_fn; | ||
| 425 | add_timer_on(timer, cpu); | ||
| 426 | ret = 1; | ||
| 427 | } | ||
| 428 | return ret; | ||
| 429 | } | 489 | } | 
| 490 | EXPORT_SYMBOL(schedule_delayed_work_on); | ||
| 430 | 491 | ||
| 431 | int schedule_on_each_cpu(void (*func) (void *info), void *info) | 492 | /** | 
| 493 | * schedule_on_each_cpu - call a function on each online CPU from keventd | ||
| 494 | * @func: the function to call | ||
| 495 | * @info: a pointer to pass to func() | ||
| 496 | * | ||
| 497 | * Returns zero on success. | ||
| 498 | * Returns -ve errno on failure. | ||
| 499 | * | ||
| 500 | * Appears to be racy against CPU hotplug. | ||
| 501 | * | ||
| 502 | * schedule_on_each_cpu() is very slow. | ||
| 503 | */ | ||
| 504 | int schedule_on_each_cpu(void (*func)(void *info), void *info) | ||
| 432 | { | 505 | { | 
| 433 | int cpu; | 506 | int cpu; | 
| 434 | struct work_struct *work; | 507 | struct work_struct *works; | 
| 435 | |||
| 436 | work = kmalloc(NR_CPUS * sizeof(struct work_struct), GFP_KERNEL); | ||
| 437 | 508 | ||
| 438 | if (!work) | 509 | works = alloc_percpu(struct work_struct); | 
| 510 | if (!works) | ||
| 439 | return -ENOMEM; | 511 | return -ENOMEM; | 
| 512 | |||
| 513 | mutex_lock(&workqueue_mutex); | ||
| 440 | for_each_online_cpu(cpu) { | 514 | for_each_online_cpu(cpu) { | 
| 441 | INIT_WORK(work + cpu, func, info); | 515 | INIT_WORK(per_cpu_ptr(works, cpu), func, info); | 
| 442 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), | 516 | __queue_work(per_cpu_ptr(keventd_wq->cpu_wq, cpu), | 
| 443 | work + cpu); | 517 | per_cpu_ptr(works, cpu)); | 
| 444 | } | 518 | } | 
| 519 | mutex_unlock(&workqueue_mutex); | ||
| 445 | flush_workqueue(keventd_wq); | 520 | flush_workqueue(keventd_wq); | 
| 446 | kfree(work); | 521 | free_percpu(works); | 
| 447 | return 0; | 522 | return 0; | 
| 448 | } | 523 | } | 
| 449 | 524 | ||
| @@ -451,6 +526,7 @@ void flush_scheduled_work(void) | |||
| 451 | { | 526 | { | 
| 452 | flush_workqueue(keventd_wq); | 527 | flush_workqueue(keventd_wq); | 
| 453 | } | 528 | } | 
| 529 | EXPORT_SYMBOL(flush_scheduled_work); | ||
| 454 | 530 | ||
| 455 | /** | 531 | /** | 
| 456 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 532 | * cancel_rearming_delayed_workqueue - reliably kill off a delayed | 
| @@ -547,7 +623,7 @@ static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) | |||
| 547 | } | 623 | } | 
| 548 | 624 | ||
| 549 | /* We're holding the cpucontrol mutex here */ | 625 | /* We're holding the cpucontrol mutex here */ | 
| 550 | static int workqueue_cpu_callback(struct notifier_block *nfb, | 626 | static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | 
| 551 | unsigned long action, | 627 | unsigned long action, | 
| 552 | void *hcpu) | 628 | void *hcpu) | 
| 553 | { | 629 | { | 
| @@ -556,6 +632,7 @@ static int workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 556 | 632 | ||
| 557 | switch (action) { | 633 | switch (action) { | 
| 558 | case CPU_UP_PREPARE: | 634 | case CPU_UP_PREPARE: | 
| 635 | mutex_lock(&workqueue_mutex); | ||
| 559 | /* Create a new workqueue thread for it. */ | 636 | /* Create a new workqueue thread for it. */ | 
| 560 | list_for_each_entry(wq, &workqueues, list) { | 637 | list_for_each_entry(wq, &workqueues, list) { | 
| 561 | if (!create_workqueue_thread(wq, hotcpu)) { | 638 | if (!create_workqueue_thread(wq, hotcpu)) { | 
| @@ -574,15 +651,27 @@ static int workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 574 | kthread_bind(cwq->thread, hotcpu); | 651 | kthread_bind(cwq->thread, hotcpu); | 
| 575 | wake_up_process(cwq->thread); | 652 | wake_up_process(cwq->thread); | 
| 576 | } | 653 | } | 
| 654 | mutex_unlock(&workqueue_mutex); | ||
| 577 | break; | 655 | break; | 
| 578 | 656 | ||
| 579 | case CPU_UP_CANCELED: | 657 | case CPU_UP_CANCELED: | 
| 580 | list_for_each_entry(wq, &workqueues, list) { | 658 | list_for_each_entry(wq, &workqueues, list) { | 
| 659 | if (!per_cpu_ptr(wq->cpu_wq, hotcpu)->thread) | ||
| 660 | continue; | ||
| 581 | /* Unbind so it can run. */ | 661 | /* Unbind so it can run. */ | 
| 582 | kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, | 662 | kthread_bind(per_cpu_ptr(wq->cpu_wq, hotcpu)->thread, | 
| 583 | any_online_cpu(cpu_online_map)); | 663 | any_online_cpu(cpu_online_map)); | 
| 584 | cleanup_workqueue_thread(wq, hotcpu); | 664 | cleanup_workqueue_thread(wq, hotcpu); | 
| 585 | } | 665 | } | 
| 666 | mutex_unlock(&workqueue_mutex); | ||
| 667 | break; | ||
| 668 | |||
| 669 | case CPU_DOWN_PREPARE: | ||
| 670 | mutex_lock(&workqueue_mutex); | ||
| 671 | break; | ||
| 672 | |||
| 673 | case CPU_DOWN_FAILED: | ||
| 674 | mutex_unlock(&workqueue_mutex); | ||
| 586 | break; | 675 | break; | 
| 587 | 676 | ||
| 588 | case CPU_DEAD: | 677 | case CPU_DEAD: | 
| @@ -590,6 +679,7 @@ static int workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 590 | cleanup_workqueue_thread(wq, hotcpu); | 679 | cleanup_workqueue_thread(wq, hotcpu); | 
| 591 | list_for_each_entry(wq, &workqueues, list) | 680 | list_for_each_entry(wq, &workqueues, list) | 
| 592 | take_over_work(wq, hotcpu); | 681 | take_over_work(wq, hotcpu); | 
| 682 | mutex_unlock(&workqueue_mutex); | ||
| 593 | break; | 683 | break; | 
| 594 | } | 684 | } | 
| 595 | 685 | ||
| @@ -605,13 +695,3 @@ void init_workqueues(void) | |||
| 605 | BUG_ON(!keventd_wq); | 695 | BUG_ON(!keventd_wq); | 
| 606 | } | 696 | } | 
| 607 | 697 | ||
| 608 | EXPORT_SYMBOL_GPL(__create_workqueue); | ||
| 609 | EXPORT_SYMBOL_GPL(queue_work); | ||
| 610 | EXPORT_SYMBOL_GPL(queue_delayed_work); | ||
| 611 | EXPORT_SYMBOL_GPL(flush_workqueue); | ||
| 612 | EXPORT_SYMBOL_GPL(destroy_workqueue); | ||
| 613 | |||
| 614 | EXPORT_SYMBOL(schedule_work); | ||
| 615 | EXPORT_SYMBOL(schedule_delayed_work); | ||
| 616 | EXPORT_SYMBOL(schedule_delayed_work_on); | ||
| 617 | EXPORT_SYMBOL(flush_scheduled_work); | ||
