aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRafael J. Wysocki <rjw@sisk.pl>2006-12-06 23:34:49 -0500
committerLinus Torvalds <torvalds@woody.osdl.org>2006-12-07 11:39:29 -0500
commit341a595850dac1b0503df34260257d71b4fdf72c (patch)
tree478bba299639ddebed62c6d9feb3c54504726e9b /kernel
parent5045cfc103566878228ca36d05a0ae0076673e5a (diff)
[PATCH] Support for freezeable workqueues
Make it possible to create a workqueue the worker thread of which will be frozen during suspend, along with other kernel threads. Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl> Acked-by: Pavel Machek <pavel@ucw.cz> Cc: Nigel Cunningham <nigel@suspend2.net> Cc: David Chinner <dgc@sgi.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/workqueue.c20
1 files changed, 14 insertions, 6 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8d1e7cb8a51a..2945b094d871 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -29,6 +29,7 @@
29#include <linux/kthread.h> 29#include <linux/kthread.h>
30#include <linux/hardirq.h> 30#include <linux/hardirq.h>
31#include <linux/mempolicy.h> 31#include <linux/mempolicy.h>
32#include <linux/freezer.h>
32 33
33/* 34/*
34 * The per-CPU workqueue (if single thread, we always use the first 35 * The per-CPU workqueue (if single thread, we always use the first
@@ -55,6 +56,8 @@ struct cpu_workqueue_struct {
55 struct task_struct *thread; 56 struct task_struct *thread;
56 57
57 int run_depth; /* Detect run_workqueue() recursion depth */ 58 int run_depth; /* Detect run_workqueue() recursion depth */
59
60 int freezeable; /* Freeze the thread during suspend */
58} ____cacheline_aligned; 61} ____cacheline_aligned;
59 62
60/* 63/*
@@ -265,7 +268,8 @@ static int worker_thread(void *__cwq)
265 struct k_sigaction sa; 268 struct k_sigaction sa;
266 sigset_t blocked; 269 sigset_t blocked;
267 270
268 current->flags |= PF_NOFREEZE; 271 if (!cwq->freezeable)
272 current->flags |= PF_NOFREEZE;
269 273
270 set_user_nice(current, -5); 274 set_user_nice(current, -5);
271 275
@@ -288,6 +292,9 @@ static int worker_thread(void *__cwq)
288 292
289 set_current_state(TASK_INTERRUPTIBLE); 293 set_current_state(TASK_INTERRUPTIBLE);
290 while (!kthread_should_stop()) { 294 while (!kthread_should_stop()) {
295 if (cwq->freezeable)
296 try_to_freeze();
297
291 add_wait_queue(&cwq->more_work, &wait); 298 add_wait_queue(&cwq->more_work, &wait);
292 if (list_empty(&cwq->worklist)) 299 if (list_empty(&cwq->worklist))
293 schedule(); 300 schedule();
@@ -364,7 +371,7 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
364EXPORT_SYMBOL_GPL(flush_workqueue); 371EXPORT_SYMBOL_GPL(flush_workqueue);
365 372
366static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq, 373static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
367 int cpu) 374 int cpu, int freezeable)
368{ 375{
369 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu); 376 struct cpu_workqueue_struct *cwq = per_cpu_ptr(wq->cpu_wq, cpu);
370 struct task_struct *p; 377 struct task_struct *p;
@@ -374,6 +381,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
374 cwq->thread = NULL; 381 cwq->thread = NULL;
375 cwq->insert_sequence = 0; 382 cwq->insert_sequence = 0;
376 cwq->remove_sequence = 0; 383 cwq->remove_sequence = 0;
384 cwq->freezeable = freezeable;
377 INIT_LIST_HEAD(&cwq->worklist); 385 INIT_LIST_HEAD(&cwq->worklist);
378 init_waitqueue_head(&cwq->more_work); 386 init_waitqueue_head(&cwq->more_work);
379 init_waitqueue_head(&cwq->work_done); 387 init_waitqueue_head(&cwq->work_done);
@@ -389,7 +397,7 @@ static struct task_struct *create_workqueue_thread(struct workqueue_struct *wq,
389} 397}
390 398
391struct workqueue_struct *__create_workqueue(const char *name, 399struct workqueue_struct *__create_workqueue(const char *name,
392 int singlethread) 400 int singlethread, int freezeable)
393{ 401{
394 int cpu, destroy = 0; 402 int cpu, destroy = 0;
395 struct workqueue_struct *wq; 403 struct workqueue_struct *wq;
@@ -409,7 +417,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
409 mutex_lock(&workqueue_mutex); 417 mutex_lock(&workqueue_mutex);
410 if (singlethread) { 418 if (singlethread) {
411 INIT_LIST_HEAD(&wq->list); 419 INIT_LIST_HEAD(&wq->list);
412 p = create_workqueue_thread(wq, singlethread_cpu); 420 p = create_workqueue_thread(wq, singlethread_cpu, freezeable);
413 if (!p) 421 if (!p)
414 destroy = 1; 422 destroy = 1;
415 else 423 else
@@ -417,7 +425,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
417 } else { 425 } else {
418 list_add(&wq->list, &workqueues); 426 list_add(&wq->list, &workqueues);
419 for_each_online_cpu(cpu) { 427 for_each_online_cpu(cpu) {
420 p = create_workqueue_thread(wq, cpu); 428 p = create_workqueue_thread(wq, cpu, freezeable);
421 if (p) { 429 if (p) {
422 kthread_bind(p, cpu); 430 kthread_bind(p, cpu);
423 wake_up_process(p); 431 wake_up_process(p);
@@ -667,7 +675,7 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
667 mutex_lock(&workqueue_mutex); 675 mutex_lock(&workqueue_mutex);
668 /* Create a new workqueue thread for it. */ 676 /* Create a new workqueue thread for it. */
669 list_for_each_entry(wq, &workqueues, list) { 677 list_for_each_entry(wq, &workqueues, list) {
670 if (!create_workqueue_thread(wq, hotcpu)) { 678 if (!create_workqueue_thread(wq, hotcpu, 0)) {
671 printk("workqueue for %i failed\n", hotcpu); 679 printk("workqueue for %i failed\n", hotcpu);
672 return NOTIFY_BAD; 680 return NOTIFY_BAD;
673 } 681 }