aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-09-22 14:36:24 -0400
committerIngo Molnar <mingo@kernel.org>2014-09-24 08:47:09 -0400
commit66339c31bc3978d5fff9c4b4cb590a861def4db2 (patch)
treecaf50db970fc626ee5c6d012fb365442c08d07ee /kernel/sched
parent7a96c231ca23f0f5622852307df4209afc502ec3 (diff)
sched: Use dl_bw_of() under RCU read lock
dl_bw_of() dereferences rq->rd which has to have RCU read lock held. Probability of use-after-free isn't zero here. Also add lockdep assert into dl_bw_cpus(). Signed-off-by: Kirill Tkhai <ktkhai@parallels.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: <stable@vger.kernel.org> # v3.14+ Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/20140922183624.11015.71558.stgit@localhost Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5b0eac9f4e78..f0adb038170b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2021,6 +2021,8 @@ unsigned long to_ratio(u64 period, u64 runtime)
2021#ifdef CONFIG_SMP 2021#ifdef CONFIG_SMP
2022inline struct dl_bw *dl_bw_of(int i) 2022inline struct dl_bw *dl_bw_of(int i)
2023{ 2023{
2024 rcu_lockdep_assert(rcu_read_lock_sched_held(),
2025 "sched RCU must be held");
2024 return &cpu_rq(i)->rd->dl_bw; 2026 return &cpu_rq(i)->rd->dl_bw;
2025} 2027}
2026 2028
@@ -2029,6 +2031,8 @@ static inline int dl_bw_cpus(int i)
2029 struct root_domain *rd = cpu_rq(i)->rd; 2031 struct root_domain *rd = cpu_rq(i)->rd;
2030 int cpus = 0; 2032 int cpus = 0;
2031 2033
2034 rcu_lockdep_assert(rcu_read_lock_sched_held(),
2035 "sched RCU must be held");
2032 for_each_cpu_and(i, rd->span, cpu_active_mask) 2036 for_each_cpu_and(i, rd->span, cpu_active_mask)
2033 cpus++; 2037 cpus++;
2034 2038
@@ -7645,6 +7649,8 @@ static int sched_dl_global_constraints(void)
7645 int cpu, ret = 0; 7649 int cpu, ret = 0;
7646 unsigned long flags; 7650 unsigned long flags;
7647 7651
7652 rcu_read_lock();
7653
7648 /* 7654 /*
7649 * Here we want to check the bandwidth not being set to some 7655 * Here we want to check the bandwidth not being set to some
7650 * value smaller than the currently allocated bandwidth in 7656 * value smaller than the currently allocated bandwidth in
@@ -7666,6 +7672,8 @@ static int sched_dl_global_constraints(void)
7666 break; 7672 break;
7667 } 7673 }
7668 7674
7675 rcu_read_unlock();
7676
7669 return ret; 7677 return ret;
7670} 7678}
7671 7679
@@ -7681,6 +7689,7 @@ static void sched_dl_do_global(void)
7681 if (global_rt_runtime() != RUNTIME_INF) 7689 if (global_rt_runtime() != RUNTIME_INF)
7682 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 7690 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
7683 7691
7692 rcu_read_lock();
7684 /* 7693 /*
7685 * FIXME: As above... 7694 * FIXME: As above...
7686 */ 7695 */
@@ -7691,6 +7700,7 @@ static void sched_dl_do_global(void)
7691 dl_b->bw = new_bw; 7700 dl_b->bw = new_bw;
7692 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7701 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7693 } 7702 }
7703 rcu_read_unlock();
7694} 7704}
7695 7705
7696static int sched_rt_global_validate(void) 7706static int sched_rt_global_validate(void)