aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/core.c
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@arm.com>2014-10-07 04:52:11 -0400
committerIngo Molnar <mingo@kernel.org>2014-10-28 05:48:00 -0400
commitf82f80426f7afcf55953924e71555984a4bd6ce6 (patch)
tree3a3e15c85f04c11d8f9bc0708b4c8ca439d3431b /kernel/sched/core.c
parent7f51412a415d87ea8598d14722fb31e4f5701257 (diff)
sched/deadline: Ensure that updates to exclusive cpusets don't break AC
How we deal with updates to exclusive cpusets is currently broken. As an example, suppose we have an exclusive cpuset composed of two cpus: A[cpu0,cpu1]. We can assign SCHED_DEADLINE task to it up to the allowed bandwidth. If we want now to modify cpusetA's cpumask, we have to check that removing a cpu's amount of bandwidth doesn't break AC guarantees. This thing isn't checked in the current code. This patch fixes the problem above, denying an update if the new cpumask won't have enough bandwidth for SCHED_DEADLINE tasks that are currently active. Signed-off-by: Juri Lelli <juri.lelli@arm.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Li Zefan <lizefan@huawei.com> Cc: cgroups@vger.kernel.org Link: http://lkml.kernel.org/r/5433E6AF.5080105@arm.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r--kernel/sched/core.c19
1 files changed, 19 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 9993feeb8b10..0456a55fc27f 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4650,6 +4650,25 @@ void init_idle(struct task_struct *idle, int cpu)
4650#endif 4650#endif
4651} 4651}
4652 4652
4653int cpuset_cpumask_can_shrink(const struct cpumask *cur,
4654 const struct cpumask *trial)
4655{
4656 int ret = 1, trial_cpus;
4657 struct dl_bw *cur_dl_b;
4658 unsigned long flags;
4659
4660 cur_dl_b = dl_bw_of(cpumask_any(cur));
4661 trial_cpus = cpumask_weight(trial);
4662
4663 raw_spin_lock_irqsave(&cur_dl_b->lock, flags);
4664 if (cur_dl_b->bw != -1 &&
4665 cur_dl_b->bw * trial_cpus < cur_dl_b->total_bw)
4666 ret = 0;
4667 raw_spin_unlock_irqrestore(&cur_dl_b->lock, flags);
4668
4669 return ret;
4670}
4671
4653int task_can_attach(struct task_struct *p, 4672int task_can_attach(struct task_struct *p,
4654 const struct cpumask *cs_cpus_allowed) 4673 const struct cpumask *cs_cpus_allowed)
4655{ 4674{