aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorKirill Tkhai <ktkhai@parallels.com>2014-09-30 04:23:37 -0400
committerIngo Molnar <mingo@kernel.org>2014-10-02 23:46:58 -0400
commitf10e00f4bf360c36edbe6bf18a6c75b171cbe012 (patch)
tree0733366372b97784b82acafa6d0fc0a57f62e27b /kernel
parent10a12983b3d437a6998b3845870e52c1c752c101 (diff)
sched/dl: Use dl_bw_of() under rcu_read_lock_sched()
rq->rd is freed using call_rcu_sched(), so rcu_read_lock() to access it is not enough. We should use either rcu_read_lock_sched() or preempt_disable(). Reported-by: Sasha Levin <sasha.levin@oracle.com> Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Kirill Tkhai <ktkhai@parallels.com Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Fixes: 66339c31bc39 "sched: Use dl_bw_of() under RCU read lock" Link: http://lkml.kernel.org/r/1412065417.20287.24.camel@tkhai Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c25
1 files changed, 16 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b5349fee1213..c84bdc098656 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5264,6 +5264,7 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
5264{ 5264{
5265 unsigned long flags; 5265 unsigned long flags;
5266 long cpu = (long)hcpu; 5266 long cpu = (long)hcpu;
5267 struct dl_bw *dl_b;
5267 5268
5268 switch (action & ~CPU_TASKS_FROZEN) { 5269 switch (action & ~CPU_TASKS_FROZEN) {
5269 case CPU_DOWN_PREPARE: 5270 case CPU_DOWN_PREPARE:
@@ -5271,15 +5272,19 @@ static int sched_cpu_inactive(struct notifier_block *nfb,
5271 5272
5272 /* explicitly allow suspend */ 5273 /* explicitly allow suspend */
5273 if (!(action & CPU_TASKS_FROZEN)) { 5274 if (!(action & CPU_TASKS_FROZEN)) {
5274 struct dl_bw *dl_b = dl_bw_of(cpu);
5275 bool overflow; 5275 bool overflow;
5276 int cpus; 5276 int cpus;
5277 5277
5278 rcu_read_lock_sched();
5279 dl_b = dl_bw_of(cpu);
5280
5278 raw_spin_lock_irqsave(&dl_b->lock, flags); 5281 raw_spin_lock_irqsave(&dl_b->lock, flags);
5279 cpus = dl_bw_cpus(cpu); 5282 cpus = dl_bw_cpus(cpu);
5280 overflow = __dl_overflow(dl_b, cpus, 0, 0); 5283 overflow = __dl_overflow(dl_b, cpus, 0, 0);
5281 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 5284 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
5282 5285
5286 rcu_read_unlock_sched();
5287
5283 if (overflow) 5288 if (overflow)
5284 return notifier_from_errno(-EBUSY); 5289 return notifier_from_errno(-EBUSY);
5285 } 5290 }
@@ -7647,11 +7652,10 @@ static int sched_dl_global_constraints(void)
7647 u64 runtime = global_rt_runtime(); 7652 u64 runtime = global_rt_runtime();
7648 u64 period = global_rt_period(); 7653 u64 period = global_rt_period();
7649 u64 new_bw = to_ratio(period, runtime); 7654 u64 new_bw = to_ratio(period, runtime);
7655 struct dl_bw *dl_b;
7650 int cpu, ret = 0; 7656 int cpu, ret = 0;
7651 unsigned long flags; 7657 unsigned long flags;
7652 7658
7653 rcu_read_lock();
7654
7655 /* 7659 /*
7656 * Here we want to check the bandwidth not being set to some 7660 * Here we want to check the bandwidth not being set to some
7657 * value smaller than the currently allocated bandwidth in 7661 * value smaller than the currently allocated bandwidth in
@@ -7662,25 +7666,27 @@ static int sched_dl_global_constraints(void)
7662 * solutions is welcome! 7666 * solutions is welcome!
7663 */ 7667 */
7664 for_each_possible_cpu(cpu) { 7668 for_each_possible_cpu(cpu) {
7665 struct dl_bw *dl_b = dl_bw_of(cpu); 7669 rcu_read_lock_sched();
7670 dl_b = dl_bw_of(cpu);
7666 7671
7667 raw_spin_lock_irqsave(&dl_b->lock, flags); 7672 raw_spin_lock_irqsave(&dl_b->lock, flags);
7668 if (new_bw < dl_b->total_bw) 7673 if (new_bw < dl_b->total_bw)
7669 ret = -EBUSY; 7674 ret = -EBUSY;
7670 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7675 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7671 7676
7677 rcu_read_unlock_sched();
7678
7672 if (ret) 7679 if (ret)
7673 break; 7680 break;
7674 } 7681 }
7675 7682
7676 rcu_read_unlock();
7677
7678 return ret; 7683 return ret;
7679} 7684}
7680 7685
7681static void sched_dl_do_global(void) 7686static void sched_dl_do_global(void)
7682{ 7687{
7683 u64 new_bw = -1; 7688 u64 new_bw = -1;
7689 struct dl_bw *dl_b;
7684 int cpu; 7690 int cpu;
7685 unsigned long flags; 7691 unsigned long flags;
7686 7692
@@ -7690,18 +7696,19 @@ static void sched_dl_do_global(void)
7690 if (global_rt_runtime() != RUNTIME_INF) 7696 if (global_rt_runtime() != RUNTIME_INF)
7691 new_bw = to_ratio(global_rt_period(), global_rt_runtime()); 7697 new_bw = to_ratio(global_rt_period(), global_rt_runtime());
7692 7698
7693 rcu_read_lock();
7694 /* 7699 /*
7695 * FIXME: As above... 7700 * FIXME: As above...
7696 */ 7701 */
7697 for_each_possible_cpu(cpu) { 7702 for_each_possible_cpu(cpu) {
7698 struct dl_bw *dl_b = dl_bw_of(cpu); 7703 rcu_read_lock_sched();
7704 dl_b = dl_bw_of(cpu);
7699 7705
7700 raw_spin_lock_irqsave(&dl_b->lock, flags); 7706 raw_spin_lock_irqsave(&dl_b->lock, flags);
7701 dl_b->bw = new_bw; 7707 dl_b->bw = new_bw;
7702 raw_spin_unlock_irqrestore(&dl_b->lock, flags); 7708 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7709
7710 rcu_read_unlock_sched();
7703 } 7711 }
7704 rcu_read_unlock();
7705} 7712}
7706 7713
7707static int sched_rt_global_validate(void) 7714static int sched_rt_global_validate(void)