aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@tv-sign.ru>2007-05-09 05:34:06 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:51 -0400
commit319c2a986eb45989690c955d9667b814ef0ed56f (patch)
tree1408756a141247f703b2866886d9707006ab33a9 /kernel
parent38c3bd96a0a7d375e12f173c34fbebf9f153cb56 (diff)
workqueue: fix freezeable workqueues implementation
Currently ->freezeable is per-cpu, this is wrong. CPU_UP_PREPARE creates cwq->thread which is not freezeable. Move ->freezeable to workqueue_struct. Signed-off-by: Oleg Nesterov <oleg@tv-sign.ru> Cc: Srivatsa Vaddagiri <vatsa@in.ibm.com> Cc: "Pallipadi, Venkatesh" <venkatesh.pallipadi@intel.com> Cc: Gautham shenoy <ego@in.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5ecf4984e382..d80dbdceadb8 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -49,8 +49,6 @@ struct cpu_workqueue_struct {
49 struct work_struct *current_work; 49 struct work_struct *current_work;
50 50
51 int run_depth; /* Detect run_workqueue() recursion depth */ 51 int run_depth; /* Detect run_workqueue() recursion depth */
52
53 int freezeable; /* Freeze the thread during suspend */
54} ____cacheline_aligned; 52} ____cacheline_aligned;
55 53
56/* 54/*
@@ -61,6 +59,7 @@ struct workqueue_struct {
61 struct cpu_workqueue_struct *cpu_wq; 59 struct cpu_workqueue_struct *cpu_wq;
62 const char *name; 60 const char *name;
63 struct list_head list; /* Empty if single thread */ 61 struct list_head list; /* Empty if single thread */
62 int freezeable; /* Freeze threads during suspend */
64}; 63};
65 64
66/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove 65/* All the per-cpu workqueues on the system, for hotplug cpu to add/remove
@@ -351,7 +350,7 @@ static int worker_thread(void *__cwq)
351 struct k_sigaction sa; 350 struct k_sigaction sa;
352 sigset_t blocked; 351 sigset_t blocked;
353 352
354 if (!cwq->freezeable) 353 if (!cwq->wq->freezeable)
355 current->flags |= PF_NOFREEZE; 354 current->flags |= PF_NOFREEZE;
356 355
357 set_user_nice(current, -5); 356 set_user_nice(current, -5);
@@ -375,7 +374,7 @@ static int worker_thread(void *__cwq)
375 374
376 set_current_state(TASK_INTERRUPTIBLE); 375 set_current_state(TASK_INTERRUPTIBLE);
377 while (!kthread_should_stop()) { 376 while (!kthread_should_stop()) {
378 if (cwq->freezeable) 377 if (cwq->wq->freezeable)
379 try_to_freeze(); 378 try_to_freeze();
380 379
381 add_wait_queue(&cwq->more_work, &wait); 380 add_wait_queue(&cwq->more_work, &wait);
@@ -546,7 +545,7 @@ out:
546EXPORT_SYMBOL_GPL(flush_work); 545EXPORT_SYMBOL_GPL(flush_work);
547 546
548static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 547static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
549 int cpu, int freezeable) 548 int cpu)
550{ 549{
551 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 550 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
552 struct task_struct *p; 551 struct task_struct *p;
@@ -554,7 +553,6 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
554 spin_lock_init(&cwq->lock); 553 spin_lock_init(&cwq->lock);
555 cwq->wq = wq; 554 cwq->wq = wq;
556 cwq->thread = NULL; 555 cwq->thread = NULL;
557 cwq->freezeable = freezeable;
558 INIT_LIST_HEAD(&cwq->worklist); 556 INIT_LIST_HEAD(&cwq->worklist);
559 init_waitqueue_head(&cwq->more_work); 557 init_waitqueue_head(&cwq->more_work);
560 558
@@ -586,10 +584,12 @@ struct workqueue_struct *__create_workqueue(const char *name,
586 } 584 }
587 585
588 wq->name = name; 586 wq->name = name;
587 wq->freezeable = freezeable;
588
589 mutex_lock(&workqueue_mutex); 589 mutex_lock(&workqueue_mutex);
590 if (singlethread) { 590 if (singlethread) {
591 INIT_LIST_HEAD(&wq->list); 591 INIT_LIST_HEAD(&wq->list);
592 p = create_workqueue_thread(wq, singlethread_cpu, freezeable); 592 p = create_workqueue_thread(wq, singlethread_cpu);
593 if (!p) 593 if (!p)
594 destroy = 1; 594 destroy = 1;
595 else 595 else
@@ -597,7 +597,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
597 } else { 597 } else {
598 list_add(&wq->list, &workqueues); 598 list_add(&wq->list, &workqueues);
599 for_each_online_cpu(cpu) { 599 for_each_online_cpu(cpu) {
600 p = create_workqueue_thread(wq, cpu, freezeable); 600 p = create_workqueue_thread(wq, cpu);
601 if (p) { 601 if (p) {
602 kthread_bind(p, cpu); 602 kthread_bind(p, cpu);
603 wake_up_process(p); 603 wake_up_process(p);
@@ -854,7 +854,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
854 mutex_lock(&workqueue_mutex); 854 mutex_lock(&workqueue_mutex);
855 /* Create a new workqueue thread for it. */ 855 /* Create a new workqueue thread for it. */
856 list_for_each_entry(wq, &workqueues, list) { 856 list_for_each_entry(wq, &workqueues, list) {
857 if (!create_workqueue_thread(wq, hotcpu, 0)) { 857 if (!create_workqueue_thread(wq, hotcpu)) {
858 printk("workqueue for %i failed\n", hotcpu); 858 printk("workqueue for %i failed\n", hotcpu);
859 return NOTIFY_BAD; 859 return NOTIFY_BAD;
860 } 860 }