aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 14:35:36 -0500
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2015-02-10 14:35:36 -0500
commit4ba24fef3eb3b142197135223b90ced2f319cd53 (patch)
treea20c125b27740ec7b4c761b11d801108e1b316b2 /kernel/workqueue.c
parent47c1ffb2b6b630894e9a16442611c056ab21c057 (diff)
parent98a4a59ee31a12105a2b84f5b8b515ac2cb208ef (diff)
Merge branch 'next' into for-linus
Prepare first round of input updates for 3.20.
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c35
1 files changed, 27 insertions, 8 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5dbe22aa3efd..6202b08f1933 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1804,8 +1804,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1804 struct worker_pool *pool = (void *)__pool; 1804 struct worker_pool *pool = (void *)__pool;
1805 struct work_struct *work; 1805 struct work_struct *work;
1806 1806
1807 spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */ 1807 spin_lock_irq(&pool->lock);
1808 spin_lock(&pool->lock); 1808 spin_lock(&wq_mayday_lock); /* for wq->maydays */
1809 1809
1810 if (need_to_create_worker(pool)) { 1810 if (need_to_create_worker(pool)) {
1811 /* 1811 /*
@@ -1818,8 +1818,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1818 send_mayday(work); 1818 send_mayday(work);
1819 } 1819 }
1820 1820
1821 spin_unlock(&pool->lock); 1821 spin_unlock(&wq_mayday_lock);
1822 spin_unlock_irq(&wq_mayday_lock); 1822 spin_unlock_irq(&pool->lock);
1823 1823
1824 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1824 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1825} 1825}
@@ -2043,9 +2043,10 @@ __acquires(&pool->lock)
2043 * kernels, where a requeueing work item waiting for something to 2043 * kernels, where a requeueing work item waiting for something to
2044 * happen could deadlock with stop_machine as such work item could 2044 * happen could deadlock with stop_machine as such work item could
2045 * indefinitely requeue itself while all other CPUs are trapped in 2045 * indefinitely requeue itself while all other CPUs are trapped in
2046 * stop_machine. 2046 * stop_machine. At the same time, report a quiescent RCU state so
2047 * the same condition doesn't freeze RCU.
2047 */ 2048 */
2048 cond_resched(); 2049 cond_resched_rcu_qs();
2049 2050
2050 spin_lock_irq(&pool->lock); 2051 spin_lock_irq(&pool->lock);
2051 2052
@@ -2247,12 +2248,30 @@ repeat:
2247 * Slurp in all works issued via this workqueue and 2248 * Slurp in all works issued via this workqueue and
2248 * process'em. 2249 * process'em.
2249 */ 2250 */
2250 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2251 WARN_ON_ONCE(!list_empty(scheduled));
2251 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2252 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2252 if (get_work_pwq(work) == pwq) 2253 if (get_work_pwq(work) == pwq)
2253 move_linked_works(work, scheduled, &n); 2254 move_linked_works(work, scheduled, &n);
2254 2255
2255 process_scheduled_works(rescuer); 2256 if (!list_empty(scheduled)) {
2257 process_scheduled_works(rescuer);
2258
2259 /*
2260 * The above execution of rescued work items could
2261 * have created more to rescue through
2262 * pwq_activate_first_delayed() or chained
2263 * queueing. Let's put @pwq back on mayday list so
2264 * that such back-to-back work items, which may be
2265 * being used to relieve memory pressure, don't
2266 * incur MAYDAY_INTERVAL delay inbetween.
2267 */
2268 if (need_to_create_worker(pool)) {
2269 spin_lock(&wq_mayday_lock);
2270 get_pwq(pwq);
2271 list_move_tail(&pwq->mayday_node, &wq->maydays);
2272 spin_unlock(&wq_mayday_lock);
2273 }
2274 }
2256 2275
2257 /* 2276 /*
2258 * Put the reference grabbed by send_mayday(). @pool won't 2277 * Put the reference grabbed by send_mayday(). @pool won't