aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2013-03-12 14:29:59 -0400
committerTejun Heo <tj@kernel.org>2013-03-12 14:29:59 -0400
commit493a1724fef9a3e931d9199f1a19e358e526a6e7 (patch)
tree5cb9ae483904b26b885ae5fb9fc7e7fdca635e71 /kernel/workqueue.c
parent24b8a84718ed28a51b452881612c267ba3f2b263 (diff)
workqueue: add wokrqueue_struct->maydays list to replace mayday cpu iterators
Similar to how pool_workqueue iteration used to be, raising and servicing mayday requests is based on CPU numbers. It's hairy because cpumask_t may not be able to handle WORK_CPU_UNBOUND and cpumasks are assumed to be always set on UP. This is ugly and can't handle multiple unbound pools to be added for unbound workqueues w/ custom attributes. Add workqueue_struct->maydays. When a pool_workqueue needs rescuing, it gets chained on the list through pool_workqueue->mayday_node and rescuer_thread() consumes the list until it's empty. This patch doesn't introduce any visible behavior changes. Signed-off-by: Tejun Heo <tj@kernel.org> Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c77
1 files changed, 28 insertions, 49 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 8942cc74d83b..26c67c76b6c5 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -170,6 +170,7 @@ struct pool_workqueue {
170 int max_active; /* L: max active works */ 170 int max_active; /* L: max active works */
171 struct list_head delayed_works; /* L: delayed works */ 171 struct list_head delayed_works; /* L: delayed works */
172 struct list_head pwqs_node; /* I: node on wq->pwqs */ 172 struct list_head pwqs_node; /* I: node on wq->pwqs */
173 struct list_head mayday_node; /* W: node on wq->maydays */
173} __aligned(1 << WORK_STRUCT_FLAG_BITS); 174} __aligned(1 << WORK_STRUCT_FLAG_BITS);
174 175
175/* 176/*
@@ -182,27 +183,6 @@ struct wq_flusher {
182}; 183};
183 184
184/* 185/*
185 * All cpumasks are assumed to be always set on UP and thus can't be
186 * used to determine whether there's something to be done.
187 */
188#ifdef CONFIG_SMP
189typedef cpumask_var_t mayday_mask_t;
190#define mayday_test_and_set_cpu(cpu, mask) \
191 cpumask_test_and_set_cpu((cpu), (mask))
192#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
193#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
194#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
195#define free_mayday_mask(mask) free_cpumask_var((mask))
196#else
197typedef unsigned long mayday_mask_t;
198#define mayday_test_and_set_cpu(cpu, mask) test_and_set_bit(0, &(mask))
199#define mayday_clear_cpu(cpu, mask) clear_bit(0, &(mask))
200#define for_each_mayday_cpu(cpu, mask) if ((cpu) = 0, (mask))
201#define alloc_mayday_mask(maskp, gfp) true
202#define free_mayday_mask(mask) do { } while (0)
203#endif
204
205/*
206 * The externally visible workqueue abstraction is an array of 186 * The externally visible workqueue abstraction is an array of
207 * per-CPU workqueues: 187 * per-CPU workqueues:
208 */ 188 */
@@ -224,7 +204,7 @@ struct workqueue_struct {
224 struct list_head flusher_queue; /* F: flush waiters */ 204 struct list_head flusher_queue; /* F: flush waiters */
225 struct list_head flusher_overflow; /* F: flush overflow list */ 205 struct list_head flusher_overflow; /* F: flush overflow list */
226 206
227 mayday_mask_t mayday_mask; /* cpus requesting rescue */ 207 struct list_head maydays; /* W: pwqs requesting rescue */
228 struct worker *rescuer; /* I: rescue worker */ 208 struct worker *rescuer; /* I: rescue worker */
229 209
230 int nr_drainers; /* W: drain in progress */ 210 int nr_drainers; /* W: drain in progress */
@@ -1850,23 +1830,21 @@ static void idle_worker_timeout(unsigned long __pool)
1850 spin_unlock_irq(&pool->lock); 1830 spin_unlock_irq(&pool->lock);
1851} 1831}
1852 1832
1853static bool send_mayday(struct work_struct *work) 1833static void send_mayday(struct work_struct *work)
1854{ 1834{
1855 struct pool_workqueue *pwq = get_work_pwq(work); 1835 struct pool_workqueue *pwq = get_work_pwq(work);
1856 struct workqueue_struct *wq = pwq->wq; 1836 struct workqueue_struct *wq = pwq->wq;
1857 unsigned int cpu; 1837
1838 lockdep_assert_held(&workqueue_lock);
1858 1839
1859 if (!(wq->flags & WQ_RESCUER)) 1840 if (!(wq->flags & WQ_RESCUER))
1860 return false; 1841 return;
1861 1842
1862 /* mayday mayday mayday */ 1843 /* mayday mayday mayday */
1863 cpu = pwq->pool->cpu; 1844 if (list_empty(&pwq->mayday_node)) {
1864 /* WORK_CPU_UNBOUND can't be set in cpumask, use cpu 0 instead */ 1845 list_add_tail(&pwq->mayday_node, &wq->maydays);
1865 if (cpu == WORK_CPU_UNBOUND)
1866 cpu = 0;
1867 if (!mayday_test_and_set_cpu(cpu, wq->mayday_mask))
1868 wake_up_process(wq->rescuer->task); 1846 wake_up_process(wq->rescuer->task);
1869 return true; 1847 }
1870} 1848}
1871 1849
1872static void pool_mayday_timeout(unsigned long __pool) 1850static void pool_mayday_timeout(unsigned long __pool)
@@ -1874,7 +1852,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1874 struct worker_pool *pool = (void *)__pool; 1852 struct worker_pool *pool = (void *)__pool;
1875 struct work_struct *work; 1853 struct work_struct *work;
1876 1854
1877 spin_lock_irq(&pool->lock); 1855 spin_lock_irq(&workqueue_lock); /* for wq->maydays */
1856 spin_lock(&pool->lock);
1878 1857
1879 if (need_to_create_worker(pool)) { 1858 if (need_to_create_worker(pool)) {
1880 /* 1859 /*
@@ -1887,7 +1866,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1887 send_mayday(work); 1866 send_mayday(work);
1888 } 1867 }
1889 1868
1890 spin_unlock_irq(&pool->lock); 1869 spin_unlock(&pool->lock);
1870 spin_unlock_irq(&workqueue_lock);
1891 1871
1892 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1872 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1893} 1873}
@@ -2336,8 +2316,6 @@ static int rescuer_thread(void *__rescuer)
2336 struct worker *rescuer = __rescuer; 2316 struct worker *rescuer = __rescuer;
2337 struct workqueue_struct *wq = rescuer->rescue_wq; 2317 struct workqueue_struct *wq = rescuer->rescue_wq;
2338 struct list_head *scheduled = &rescuer->scheduled; 2318 struct list_head *scheduled = &rescuer->scheduled;
2339 bool is_unbound = wq->flags & WQ_UNBOUND;
2340 unsigned int cpu;
2341 2319
2342 set_user_nice(current, RESCUER_NICE_LEVEL); 2320 set_user_nice(current, RESCUER_NICE_LEVEL);
2343 2321
@@ -2355,18 +2333,19 @@ repeat:
2355 return 0; 2333 return 0;
2356 } 2334 }
2357 2335
2358 /* 2336 /* see whether any pwq is asking for help */
2359 * See whether any cpu is asking for help. Unbounded 2337 spin_lock_irq(&workqueue_lock);
2360 * workqueues use cpu 0 in mayday_mask for CPU_UNBOUND. 2338
2361 */ 2339 while (!list_empty(&wq->maydays)) {
2362 for_each_mayday_cpu(cpu, wq->mayday_mask) { 2340 struct pool_workqueue *pwq = list_first_entry(&wq->maydays,
2363 unsigned int tcpu = is_unbound ? WORK_CPU_UNBOUND : cpu; 2341 struct pool_workqueue, mayday_node);
2364 struct pool_workqueue *pwq = get_pwq(tcpu, wq);
2365 struct worker_pool *pool = pwq->pool; 2342 struct worker_pool *pool = pwq->pool;
2366 struct work_struct *work, *n; 2343 struct work_struct *work, *n;
2367 2344
2368 __set_current_state(TASK_RUNNING); 2345 __set_current_state(TASK_RUNNING);
2369 mayday_clear_cpu(cpu, wq->mayday_mask); 2346 list_del_init(&pwq->mayday_node);
2347
2348 spin_unlock_irq(&workqueue_lock);
2370 2349
2371 /* migrate to the target cpu if possible */ 2350 /* migrate to the target cpu if possible */
2372 worker_maybe_bind_and_lock(pool); 2351 worker_maybe_bind_and_lock(pool);
@@ -2392,9 +2371,12 @@ repeat:
2392 wake_up_worker(pool); 2371 wake_up_worker(pool);
2393 2372
2394 rescuer->pool = NULL; 2373 rescuer->pool = NULL;
2395 spin_unlock_irq(&pool->lock); 2374 spin_unlock(&pool->lock);
2375 spin_lock(&workqueue_lock);
2396 } 2376 }
2397 2377
2378 spin_unlock_irq(&workqueue_lock);
2379
2398 /* rescuers should never participate in concurrency management */ 2380 /* rescuers should never participate in concurrency management */
2399 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING)); 2381 WARN_ON_ONCE(!(rescuer->flags & WORKER_NOT_RUNNING));
2400 schedule(); 2382 schedule();
@@ -3192,6 +3174,7 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3192 INIT_LIST_HEAD(&wq->pwqs); 3174 INIT_LIST_HEAD(&wq->pwqs);
3193 INIT_LIST_HEAD(&wq->flusher_queue); 3175 INIT_LIST_HEAD(&wq->flusher_queue);
3194 INIT_LIST_HEAD(&wq->flusher_overflow); 3176 INIT_LIST_HEAD(&wq->flusher_overflow);
3177 INIT_LIST_HEAD(&wq->maydays);
3195 3178
3196 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0); 3179 lockdep_init_map(&wq->lockdep_map, lock_name, key, 0);
3197 INIT_LIST_HEAD(&wq->list); 3180 INIT_LIST_HEAD(&wq->list);
@@ -3205,14 +3188,12 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3205 pwq->flush_color = -1; 3188 pwq->flush_color = -1;
3206 pwq->max_active = max_active; 3189 pwq->max_active = max_active;
3207 INIT_LIST_HEAD(&pwq->delayed_works); 3190 INIT_LIST_HEAD(&pwq->delayed_works);
3191 INIT_LIST_HEAD(&pwq->mayday_node);
3208 } 3192 }
3209 3193
3210 if (flags & WQ_RESCUER) { 3194 if (flags & WQ_RESCUER) {
3211 struct worker *rescuer; 3195 struct worker *rescuer;
3212 3196
3213 if (!alloc_mayday_mask(&wq->mayday_mask, GFP_KERNEL))
3214 goto err;
3215
3216 wq->rescuer = rescuer = alloc_worker(); 3197 wq->rescuer = rescuer = alloc_worker();
3217 if (!rescuer) 3198 if (!rescuer)
3218 goto err; 3199 goto err;
@@ -3246,7 +3227,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *fmt,
3246err: 3227err:
3247 if (wq) { 3228 if (wq) {
3248 free_pwqs(wq); 3229 free_pwqs(wq);
3249 free_mayday_mask(wq->mayday_mask);
3250 kfree(wq->rescuer); 3230 kfree(wq->rescuer);
3251 kfree(wq); 3231 kfree(wq);
3252 } 3232 }
@@ -3289,7 +3269,6 @@ void destroy_workqueue(struct workqueue_struct *wq)
3289 3269
3290 if (wq->flags & WQ_RESCUER) { 3270 if (wq->flags & WQ_RESCUER) {
3291 kthread_stop(wq->rescuer->task); 3271 kthread_stop(wq->rescuer->task);
3292 free_mayday_mask(wq->mayday_mask);
3293 kfree(wq->rescuer); 3272 kfree(wq->rescuer);
3294 } 3273 }
3295 3274