aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/workqueue.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/workqueue.c')
-rw-r--r--kernel/workqueue.c14
1 files changed, 11 insertions, 3 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 5ca7ce9ce754..e3378e8d3a5c 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1291,8 +1291,14 @@ __acquires(&gcwq->lock)
1291 return true; 1291 return true;
1292 spin_unlock_irq(&gcwq->lock); 1292 spin_unlock_irq(&gcwq->lock);
1293 1293
1294 /* CPU has come up inbetween, retry migration */ 1294 /*
1295 * We've raced with CPU hot[un]plug. Give it a breather
1296 * and retry migration. cond_resched() is required here;
1297 * otherwise, we might deadlock against cpu_stop trying to
1298 * bring down the CPU on non-preemptive kernel.
1299 */
1295 cpu_relax(); 1300 cpu_relax();
1301 cond_resched();
1296 } 1302 }
1297} 1303}
1298 1304
@@ -1366,8 +1372,10 @@ static struct worker *create_worker(struct global_cwq *gcwq, bool bind)
1366 worker->id = id; 1372 worker->id = id;
1367 1373
1368 if (!on_unbound_cpu) 1374 if (!on_unbound_cpu)
1369 worker->task = kthread_create(worker_thread, worker, 1375 worker->task = kthread_create_on_node(worker_thread,
1370 "kworker/%u:%d", gcwq->cpu, id); 1376 worker,
1377 cpu_to_node(gcwq->cpu),
1378 "kworker/%u:%d", gcwq->cpu, id);
1371 else 1379 else
1372 worker->task = kthread_create(worker_thread, worker, 1380 worker->task = kthread_create(worker_thread, worker,
1373 "kworker/u:%d", id); 1381 "kworker/u:%d", id);