diff options
Diffstat (limited to 'kernel/workqueue.c')
| -rw-r--r-- | kernel/workqueue.c | 35 |
1 files changed, 27 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 8d1e7cb8a51a..c5257316f4b9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -29,6 +29,9 @@ | |||
| 29 | #include <linux/kthread.h> | 29 | #include <linux/kthread.h> |
| 30 | #include <linux/hardirq.h> | 30 | #include <linux/hardirq.h> |
| 31 | #include <linux/mempolicy.h> | 31 | #include <linux/mempolicy.h> |
| 32 | #include <linux/freezer.h> | ||
| 33 | #include <linux/kallsyms.h> | ||
| 34 | #include <linux/debug_locks.h> | ||
| 32 | 35 | ||
| 33 | /* | 36 | /* |
| 34 | * The per-CPU workqueue (if single thread, we always use the first | 37 | * The per-CPU workqueue (if single thread, we always use the first |
| @@ -55,6 +58,8 @@ struct cpu_workqueue_struct { | |||
| 55 | struct task_struct *thread; | 58 | struct task_struct *thread; |
| 56 | 59 | ||
| 57 | int run_depth; /* Detect run_workqueue() recursion depth */ | 60 | int run_depth; /* Detect run_workqueue() recursion depth */ |
| 61 | |||
| 62 | int freezeable; /* Freeze the thread during suspend */ | ||
| 58 | } ____cacheline_aligned; | 63 | } ____cacheline_aligned; |
| 59 | 64 | ||
| 60 | /* | 65 | /* |
| @@ -250,6 +255,17 @@ static void run_workqueue(struct cpu_workqueue_struct *cwq) | |||
| 250 | work_release(work); | 255 | work_release(work); |
| 251 | f(work); | 256 | f(work); |
| 252 | 257 | ||
| 258 | if (unlikely(in_atomic() || lockdep_depth(current) > 0)) { | ||
| 259 | printk(KERN_ERR "BUG: workqueue leaked lock or atomic: " | ||
| 260 | "%s/0x%08x/%d\n", | ||
| 261 | current->comm, preempt_count(), | ||
| 262 | current->pid); | ||
| 263 | printk(KERN_ERR " last function: "); | ||
| 264 | print_symbol("%s\n", (unsigned long)f); | ||
| 265 | debug_show_held_locks(current); | ||
| 266 | dump_stack(); | ||
| 267 | } | ||
| 268 | |||
| 253 | spin_lock_irqsave(&cwq->lock, flags); | 269 | spin_lock_irqsave(&cwq->lock, flags); |
| 254 | cwq->remove_sequence++; | 270 | cwq->remove_sequence++; |
| 255 | wake_up(&cwq->work_done); | 271 | wake_up(&cwq->work_done); |
| @@ -265,7 +281,8 @@ static int worker_thread(void *__cwq) | |||
| 265 | struct k_sigaction sa; | 281 | struct k_sigaction sa; |
| 266 | sigset_t blocked; | 282 | sigset_t blocked; |
| 267 | 283 | ||
| 268 | current->flags |= PF_NOFREEZE; | 284 | if (!cwq->freezeable) |
| 285 | current->flags |= PF_NOFREEZE; | ||
| 269 | 286 | ||
| 270 | set_user_nice(current, -5); | 287 | set_user_nice(current, -5); |
| 271 | 288 | ||
| @@ -288,6 +305,9 @@ static int worker_thread(void *__cwq) | |||
| 288 | 305 | ||
| 289 | set_current_state(TASK_INTERRUPTIBLE); | 306 | set_current_state(TASK_INTERRUPTIBLE); |
| 290 | while (!kthread_should_stop()) { | 307 | while (!kthread_should_stop()) { |
| 308 | if (cwq->freezeable) | ||
| 309 | try_to_freeze(); | ||
| 310 | |||
| 291 | add_wait_queue(&cwq->more_work, &wait); | 311 | add_wait_queue(&cwq->more_work, &wait); |
| 292 | if (list_empty(&cwq->worklist)) | 312 | if (list_empty(&cwq->worklist)) |
| 293 | schedule(); | 313 | schedule(); |
| @@ -364,7 +384,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq) | |||
| 364 | EXPORT_SYMBOL_GPL(flush_workqueue); | 384 | EXPORT_SYMBOL_GPL(flush_workqueue); |
| 365 | 385 | ||
| 366 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | 386 | static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, |
| 367 | int cpu) | 387 | int cpu, int freezeable) |
| 368 | { | 388 | { |
| 369 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); | 389 | struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); |
| 370 | struct task_struct *p; | 390 | struct task_struct *p; |
| @@ -374,6 +394,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | |||
| 374 | cwq->thread = NULL; | 394 | cwq->thread = NULL; |
| 375 | cwq->insert_sequence = 0; | 395 | cwq->insert_sequence = 0; |
| 376 | cwq->remove_sequence = 0; | 396 | cwq->remove_sequence = 0; |
| 397 | cwq->freezeable = freezeable; | ||
| 377 | INIT_LIST_HEAD(&cwq->worklist); | 398 | INIT_LIST_HEAD(&cwq->worklist); |
| 378 | init_waitqueue_head(&cwq->more_work); | 399 | init_waitqueue_head(&cwq->more_work); |
| 379 | init_waitqueue_head(&cwq->work_done); | 400 | init_waitqueue_head(&cwq->work_done); |
| @@ -389,7 +410,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, | |||
| 389 | } | 410 | } |
| 390 | 411 | ||
| 391 | struct workqueue_struct *__create_workqueue(const char *name, | 412 | struct workqueue_struct *__create_workqueue(const char *name, |
| 392 | int singlethread) | 413 | int singlethread, int freezeable) |
| 393 | { | 414 | { |
| 394 | int cpu, destroy = 0; | 415 | int cpu, destroy = 0; |
| 395 | struct workqueue_struct *wq; | 416 | struct workqueue_struct *wq; |
| @@ -409,7 +430,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 409 | mutex_lock(&workqueue_mutex); | 430 | mutex_lock(&workqueue_mutex); |
| 410 | if (singlethread) { | 431 | if (singlethread) { |
| 411 | INIT_LIST_HEAD(&wq->list); | 432 | INIT_LIST_HEAD(&wq->list); |
| 412 | p = create_workqueue_thread(wq, singlethread_cpu); | 433 | p = create_workqueue_thread(wq, singlethread_cpu, freezeable); |
| 413 | if (!p) | 434 | if (!p) |
| 414 | destroy = 1; | 435 | destroy = 1; |
| 415 | else | 436 | else |
| @@ -417,7 +438,7 @@ struct workqueue_struct *__create_workqueue(const char *name, | |||
| 417 | } else { | 438 | } else { |
| 418 | list_add(&wq->list, &workqueues); | 439 | list_add(&wq->list, &workqueues); |
| 419 | for_each_online_cpu(cpu) { | 440 | for_each_online_cpu(cpu) { |
| 420 | p = create_workqueue_thread(wq, cpu); | 441 | p = create_workqueue_thread(wq, cpu, freezeable); |
| 421 | if (p) { | 442 | if (p) { |
| 422 | kthread_bind(p, cpu); | 443 | kthread_bind(p, cpu); |
| 423 | wake_up_process(p); | 444 | wake_up_process(p); |
| @@ -634,7 +655,6 @@ int current_is_keventd(void) | |||
| 634 | 655 | ||
| 635 | } | 656 | } |
| 636 | 657 | ||
| 637 | #ifdef CONFIG_HOTPLUG_CPU | ||
| 638 | /* Take the work from this (downed) CPU. */ | 658 | /* Take the work from this (downed) CPU. */ |
| 639 | static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) | 659 | static void take_over_work(struct workqueue_struct *wq, unsigned int cpu) |
| 640 | { | 660 | { |
| @@ -667,7 +687,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 667 | mutex_lock(&workqueue_mutex); | 687 | mutex_lock(&workqueue_mutex); |
| 668 | /* Create a new workqueue thread for it. */ | 688 | /* Create a new workqueue thread for it. */ |
| 669 | list_for_each_entry(wq, &workqueues, list) { | 689 | list_for_each_entry(wq, &workqueues, list) { |
| 670 | if (!create_workqueue_thread(wq, hotcpu)) { | 690 | if (!create_workqueue_thread(wq, hotcpu, 0)) { |
| 671 | printk("workqueue for %i failed\n", hotcpu); | 691 | printk("workqueue for %i failed\n", hotcpu); |
| 672 | return NOTIFY_BAD; | 692 | return NOTIFY_BAD; |
| 673 | } | 693 | } |
| @@ -717,7 +737,6 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb, | |||
| 717 | 737 | ||
| 718 | return NOTIFY_OK; | 738 | return NOTIFY_OK; |
| 719 | } | 739 | } |
| 720 | #endif | ||
| 721 | 740 | ||
| 722 | void init_workqueues(void) | 741 | void init_workqueues(void) |
| 723 | { | 742 | { |
