aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2010-07-20 09:59:09 -0400
committerTejun Heo <tj@kernel.org>2010-07-20 09:59:09 -0400
commitf2e005aaff4878a8ea93d5fb033a21389b72579a (patch)
tree13929e2e6010d10ec84dfd43e2cd063002daecca /kernel/workqueue.c
parent931ac77ef65d2d90ee1def63d2041402ec7c53ab (diff)
workqueue: fix mayday_mask handling on UP
All cpumasks are assumed to have cpu 0 permanently set on UP, so it can't be used to signify whether there's something to be done for the CPU. workqueue was using cpumask to track which CPU requested rescuer assistance and this led rescuer thread to think there always are pending mayday requests on UP, which resulted in infinite busy loops. This patch fixes the problem by introducing mayday_mask_t and associated helpers which wrap cpumask on SMP and emulates its behavior using bitops and unsigned long on UP. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c35
1 files changed, 28 insertions, 7 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 79a11e40f311..c11edc9c9365 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -186,6 +186,27 @@ struct wq_flusher {
186}; 186};
187 187
188/* 188/*
189 * All cpumasks are assumed to be always set on UP and thus can't be
190 * used to determine whether there's something to be done.
191 */
192#ifdef CONFIG_SMP
193typedef cpumask_var_t mayday_mask_t;
194#define mayday_test_and_set_cpu(cpu, mask) \
195 cpumask_test_and_set_cpu((cpu), (mask))
196#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
197#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
198#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp))
199#define free_mayday_mask(mask) free_cpumask_var((mask))
200#else
201typedef unsigned long mayday_mask_t;
202#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
203#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
204#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
205#define alloc_mayday_mask(maskp, gfp) true
206#define free_mayday_mask(mask) do { } while (0)
207#endif
208
209/*
189 * The externally visible workqueue abstraction is an array of 210 * The externally visible workqueue abstraction is an array of
190 * per-CPU workqueues: 211 * per-CPU workqueues:
191 */ 212 */
@@ -206,7 +227,7 @@ struct workqueue_struct {
206 struct list_head flusher_queue; /* F: flush waiters */ 227 struct list_head flusher_queue; /* F: flush waiters */
207 struct list_head flusher_overflow; /* F: flush overflow list */ 228 struct list_head flusher_overflow; /* F: flush overflow list */
208 229
209 cpumask_var_t mayday_mask; /* cpus requesting rescue */ 230 mayday_mask_t mayday_mask; /* cpus requesting rescue */
210 struct worker *rescuer; /* I: rescue worker */ 231 struct worker *rescuer; /* I: rescue worker */
211 232
212 int saved_max_active; /* W: saved cwq max_active */ 233 int saved_max_active; /* W: saved cwq max_active */
@@ -1387,7 +1408,7 @@ static bool send_mayday(struct work_struct *work)
1387 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1408 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */
1388 if (cpu == WORK_CPU_UNBOUND) 1409 if (cpu == WORK_CPU_UNBOUND)
1389 cpu = 0; 1410 cpu = 0;
1390 if (!cpumask_test_and_set_cpu(cpu, wq->mayday_mask)) 1411 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1391 wake_up_process(wq->rescuer->task); 1412 wake_up_process(wq->rescuer->task);
1392 return true; 1413 return true;
1393} 1414}
@@ -1915,14 +1936,14 @@ repeat:
1915 * See whether any cpu is asking for help. Unbounded 1936 * See whether any cpu is asking for help. Unbounded
1916 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. 1937 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND.
1917 */ 1938 */
1918 for_each_cpu(cpu, wq->mayday_mask) { 1939 for_each_mayday_cpu(cpu, wq->mayday_mask) {
1919 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 1940 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu;
1920 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq); 1941 struct cpu_workqueue_struct *cwq = get_cwq(tcpu, wq);
1921 struct global_cwq *gcwq = cwq->gcwq; 1942 struct global_cwq *gcwq = cwq->gcwq;
1922 struct work_struct *work, *n; 1943 struct work_struct *work, *n;
1923 1944
1924 __set_current_state(TASK_RUNNING); 1945 __set_current_state(TASK_RUNNING);
1925 cpumask_clear_cpu(cpu, wq->mayday_mask); 1946 mayday_clear_cpu(cpu, wq->mayday_mask);
1926 1947
1927 /* migrate to the target cpu if possible */ 1948 /* migrate to the target cpu if possible */
1928 rescuer->gcwq = gcwq; 1949 rescuer->gcwq = gcwq;
@@ -2724,7 +2745,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2724 if (flags & WQ_RESCUER) { 2745 if (flags & WQ_RESCUER) {
2725 struct worker *rescuer; 2746 struct worker *rescuer;
2726 2747
2727 if (!alloc_cpumask_var(&wq->mayday_mask, GFP_KERNEL)) 2748 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
2728 goto err; 2749 goto err;
2729 2750
2730 wq->rescuer = rescuer = alloc_worker(); 2751 wq->rescuer = rescuer = alloc_worker();
@@ -2759,7 +2780,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2759err: 2780err:
2760 if (wq) { 2781 if (wq) {
2761 free_cwqs(wq); 2782 free_cwqs(wq);
2762 free_cpumask_var(wq->mayday_mask); 2783 free_mayday_mask(wq->mayday_mask);
2763 kfree(wq->rescuer); 2784 kfree(wq->rescuer);
2764 kfree(wq); 2785 kfree(wq);
2765 } 2786 }
@@ -2800,7 +2821,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2800 2821
2801 if (wq->flags & WQ_RESCUER) { 2822 if (wq->flags & WQ_RESCUER) {
2802 kthread_stop(wq->rescuer->task); 2823 kthread_stop(wq->rescuer->task);
2803 free_cpumask_var(wq->mayday_mask); 2824 free_mayday_mask(wq->mayday_mask);
2804 } 2825 }
2805 2826
2806 free_cwqs(wq); 2827 free_cwqs(wq);