aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra (Intel) <peterz@infradead.org>2016-03-10 06:54:08 -0500
committerThomas Gleixner <tglx@linutronix.de>2016-05-06 08:58:22 -0400
commite9d867a67fd03ccc07248ca4e9c2f74fed494d5b (patch)
treebd1e01ee5b5581203d3fa8397fd863c400cea2bc /kernel/sched
parent04974df8049fc4240d22759a91e035082ccd18b4 (diff)
sched: Allow per-cpu kernel threads to run on online && !active
In order to enable symmetric hotplug, we must mirror the online && !active state of cpu-down on the cpu-up side. However, to retain sanity, limit this state to per-cpu kthreads. Aside from the change to set_cpus_allowed_ptr(), which allow moving the per-cpu kthreads on, the other critical piece is the cpu selection for pinned tasks in select_task_rq(). This avoids dropping into select_fallback_rq(). select_fallback_rq() cannot be allowed to select !active cpus because its used to migrate user tasks away. And we do not want to move user tasks onto cpus that are in transition. Requested-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Thomas Gleixner <tglx@linutronix.de> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: Jan H. Schönherr <jschoenh@amazon.de> Cc: Oleg Nesterov <oleg@redhat.com> Cc: rt@linutronix.de Link: http://lkml.kernel.org/r/20160301152303.GV6356@twins.programming.kicks-ass.net Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c49
1 files changed, 42 insertions, 7 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8b489fcac37b..8bfd7d4f1c21 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1082,13 +1082,21 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
1082static int __set_cpus_allowed_ptr(struct task_struct *p, 1082static int __set_cpus_allowed_ptr(struct task_struct *p,
1083 const struct cpumask *new_mask, bool check) 1083 const struct cpumask *new_mask, bool check)
1084{ 1084{
1085 const struct cpumask *cpu_valid_mask = cpu_active_mask;
1086 unsigned int dest_cpu;
1085 unsigned long flags; 1087 unsigned long flags;
1086 struct rq *rq; 1088 struct rq *rq;
1087 unsigned int dest_cpu;
1088 int ret = 0; 1089 int ret = 0;
1089 1090
1090 rq = task_rq_lock(p, &flags); 1091 rq = task_rq_lock(p, &flags);
1091 1092
1093 if (p->flags & PF_KTHREAD) {
1094 /*
1095 * Kernel threads are allowed on online && !active CPUs
1096 */
1097 cpu_valid_mask = cpu_online_mask;
1098 }
1099
1092 /* 1100 /*
1093 * Must re-check here, to close a race against __kthread_bind(), 1101 * Must re-check here, to close a race against __kthread_bind(),
1094 * sched_setaffinity() is not guaranteed to observe the flag. 1102 * sched_setaffinity() is not guaranteed to observe the flag.
@@ -1101,18 +1109,28 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
1101 if (cpumask_equal(&p->cpus_allowed, new_mask)) 1109 if (cpumask_equal(&p->cpus_allowed, new_mask))
1102 goto out; 1110 goto out;
1103 1111
1104 if (!cpumask_intersects(new_mask, cpu_active_mask)) { 1112 if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
1105 ret = -EINVAL; 1113 ret = -EINVAL;
1106 goto out; 1114 goto out;
1107 } 1115 }
1108 1116
1109 do_set_cpus_allowed(p, new_mask); 1117 do_set_cpus_allowed(p, new_mask);
1110 1118
1119 if (p->flags & PF_KTHREAD) {
1120 /*
1121 * For kernel threads that do indeed end up on online &&
1122 * !active we want to ensure they are strict per-cpu threads.
1123 */
1124 WARN_ON(cpumask_intersects(new_mask, cpu_online_mask) &&
1125 !cpumask_intersects(new_mask, cpu_active_mask) &&
1126 p->nr_cpus_allowed != 1);
1127 }
1128
1111 /* Can the task run on the task's current CPU? If so, we're done */ 1129 /* Can the task run on the task's current CPU? If so, we're done */
1112 if (cpumask_test_cpu(task_cpu(p), new_mask)) 1130 if (cpumask_test_cpu(task_cpu(p), new_mask))
1113 goto out; 1131 goto out;
1114 1132
1115 dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); 1133 dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
1116 if (task_running(rq, p) || p->state == TASK_WAKING) { 1134 if (task_running(rq, p) || p->state == TASK_WAKING) {
1117 struct migration_arg arg = { p, dest_cpu }; 1135 struct migration_arg arg = { p, dest_cpu };
1118 /* Need help from migration thread: drop lock and wait. */ 1136 /* Need help from migration thread: drop lock and wait. */
@@ -1431,6 +1449,25 @@ EXPORT_SYMBOL_GPL(kick_process);
1431 1449
1432/* 1450/*
1433 * ->cpus_allowed is protected by both rq->lock and p->pi_lock 1451 * ->cpus_allowed is protected by both rq->lock and p->pi_lock
1452 *
1453 * A few notes on cpu_active vs cpu_online:
1454 *
1455 * - cpu_active must be a subset of cpu_online
1456 *
1457 * - on cpu-up we allow per-cpu kthreads on the online && !active cpu,
1458 * see __set_cpus_allowed_ptr(). At this point the newly online
1459 * cpu isn't yet part of the sched domains, and balancing will not
1460 * see it.
1461 *
1462 * - on cpu-down we clear cpu_active() to mask the sched domains and
1463 * avoid the load balancer to place new tasks on the to be removed
1464 * cpu. Existing tasks will remain running there and will be taken
1465 * off.
1466 *
1467 * This means that fallback selection must not select !active CPUs.
1468 * And can assume that any active CPU must be online. Conversely
1469 * select_task_rq() below may allow selection of !active CPUs in order
1470 * to satisfy the above rules.
1434 */ 1471 */
1435static int select_fallback_rq(int cpu, struct task_struct *p) 1472static int select_fallback_rq(int cpu, struct task_struct *p)
1436{ 1473{
@@ -1449,8 +1486,6 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1449 1486
1450 /* Look for allowed, online CPU in same node. */ 1487 /* Look for allowed, online CPU in same node. */
1451 for_each_cpu(dest_cpu, nodemask) { 1488 for_each_cpu(dest_cpu, nodemask) {
1452 if (!cpu_online(dest_cpu))
1453 continue;
1454 if (!cpu_active(dest_cpu)) 1489 if (!cpu_active(dest_cpu))
1455 continue; 1490 continue;
1456 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) 1491 if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p)))
@@ -1461,8 +1496,6 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
1461 for (;;) { 1496 for (;;) {
1462 /* Any allowed, online CPU? */ 1497 /* Any allowed, online CPU? */
1463 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) { 1498 for_each_cpu(dest_cpu, tsk_cpus_allowed(p)) {
1464 if (!cpu_online(dest_cpu))
1465 continue;
1466 if (!cpu_active(dest_cpu)) 1499 if (!cpu_active(dest_cpu))
1467 continue; 1500 continue;
1468 goto out; 1501 goto out;
@@ -1514,6 +1547,8 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
1514 1547
1515 if (p->nr_cpus_allowed > 1) 1548 if (p->nr_cpus_allowed > 1)
1516 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); 1549 cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
1550 else
1551 cpu = cpumask_any(tsk_cpus_allowed(p));
1517 1552
1518 /* 1553 /*
1519 * In order not to call set_task_cpu() on a blocking task we need 1554 * In order not to call set_task_cpu() on a blocking task we need