diff options
author | Juri Lelli <juri.lelli@arm.com> | 2015-03-31 04:53:37 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-04-02 11:42:56 -0400 |
commit | 3c18d447b3b36a8d3c90dc37dfbd363cdb685d0a (patch) | |
tree | ea9dd78ff9cebdd03d7f8079b2c01e5331586054 /kernel/sched/core.c | |
parent | 4cd57f97135840f637431c92380c8da3edbe44ed (diff) |
sched/core: Check for available DL bandwidth in cpuset_cpu_inactive()
Hotplug operations are destructive w.r.t. cpusets. In case such an
operation is performed on a CPU belonging to an exlusive cpuset, the
DL bandwidth information associated with the corresponding root
domain is gone even if the operation fails (in sched_cpu_inactive()).
For this reason we need to move the check we currently have in
sched_cpu_inactive() to cpuset_cpu_inactive() to prevent useless
cpusets reconfiguration in the CPU_DOWN_FAILED path.
Signed-off-by: Juri Lelli <juri.lelli@arm.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Juri Lelli <juri.lelli@gmail.com>
Link: http://lkml.kernel.org/r/1427792017-7356-2-git-send-email-juri.lelli@arm.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched/core.c')
-rw-r--r-- | kernel/sched/core.c | 56 |
1 files changed, 28 insertions, 28 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4c49e75ca24d..28b0d75a8273 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -5337,36 +5337,13 @@ static int sched_cpu_active(struct notifier_block *nfb, | |||
5337 | static int sched_cpu_inactive(struct notifier_block *nfb, | 5337 | static int sched_cpu_inactive(struct notifier_block *nfb, |
5338 | unsigned long action, void *hcpu) | 5338 | unsigned long action, void *hcpu) |
5339 | { | 5339 | { |
5340 | unsigned long flags; | ||
5341 | long cpu = (long)hcpu; | ||
5342 | struct dl_bw *dl_b; | ||
5343 | |||
5344 | switch (action & ~CPU_TASKS_FROZEN) { | 5340 | switch (action & ~CPU_TASKS_FROZEN) { |
5345 | case CPU_DOWN_PREPARE: | 5341 | case CPU_DOWN_PREPARE: |
5346 | set_cpu_active(cpu, false); | 5342 | set_cpu_active((long)hcpu, false); |
5347 | |||
5348 | /* explicitly allow suspend */ | ||
5349 | if (!(action & CPU_TASKS_FROZEN)) { | ||
5350 | bool overflow; | ||
5351 | int cpus; | ||
5352 | |||
5353 | rcu_read_lock_sched(); | ||
5354 | dl_b = dl_bw_of(cpu); | ||
5355 | |||
5356 | raw_spin_lock_irqsave(&dl_b->lock, flags); | ||
5357 | cpus = dl_bw_cpus(cpu); | ||
5358 | overflow = __dl_overflow(dl_b, cpus, 0, 0); | ||
5359 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); | ||
5360 | |||
5361 | rcu_read_unlock_sched(); | ||
5362 | |||
5363 | if (overflow) | ||
5364 | return notifier_from_errno(-EBUSY); | ||
5365 | } | ||
5366 | return NOTIFY_OK; | 5343 | return NOTIFY_OK; |
5344 | default: | ||
5345 | return NOTIFY_DONE; | ||
5367 | } | 5346 | } |
5368 | |||
5369 | return NOTIFY_DONE; | ||
5370 | } | 5347 | } |
5371 | 5348 | ||
5372 | static int __init migration_init(void) | 5349 | static int __init migration_init(void) |
@@ -7006,7 +6983,6 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, | |||
7006 | */ | 6983 | */ |
7007 | 6984 | ||
7008 | case CPU_ONLINE: | 6985 | case CPU_ONLINE: |
7009 | case CPU_DOWN_FAILED: | ||
7010 | cpuset_update_active_cpus(true); | 6986 | cpuset_update_active_cpus(true); |
7011 | break; | 6987 | break; |
7012 | default: | 6988 | default: |
@@ -7018,8 +6994,32 @@ static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, | |||
7018 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, | 6994 | static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, |
7019 | void *hcpu) | 6995 | void *hcpu) |
7020 | { | 6996 | { |
7021 | switch (action) { | 6997 | unsigned long flags; |
6998 | long cpu = (long)hcpu; | ||
6999 | struct dl_bw *dl_b; | ||
7000 | |||
7001 | switch (action & ~CPU_TASKS_FROZEN) { | ||
7022 | case CPU_DOWN_PREPARE: | 7002 | case CPU_DOWN_PREPARE: |
7003 | /* explicitly allow suspend */ | ||
7004 | if (!(action & CPU_TASKS_FROZEN)) { | ||
7005 | bool overflow; | ||
7006 | int cpus; | ||
7007 | |||
7008 | rcu_read_lock_sched(); | ||
7009 | dl_b = dl_bw_of(cpu); | ||
7010 | |||
7011 | raw_spin_lock_irqsave(&dl_b->lock, flags); | ||
7012 | cpus = dl_bw_cpus(cpu); | ||
7013 | overflow = __dl_overflow(dl_b, cpus, 0, 0); | ||
7014 | raw_spin_unlock_irqrestore(&dl_b->lock, flags); | ||
7015 | |||
7016 | rcu_read_unlock_sched(); | ||
7017 | |||
7018 | if (overflow) { | ||
7019 | trace_printk("hotplug failed for cpu %lu", cpu); | ||
7020 | return notifier_from_errno(-EBUSY); | ||
7021 | } | ||
7022 | } | ||
7023 | cpuset_update_active_cpus(false); | 7023 | cpuset_update_active_cpus(false); |
7024 | break; | 7024 | break; |
7025 | case CPU_DOWN_PREPARE_FROZEN: | 7025 | case CPU_DOWN_PREPARE_FROZEN: |