aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJuri Lelli <juri.lelli@gmail.com>2014-02-11 03:24:27 -0500
committerThomas Gleixner <tglx@linutronix.de>2014-02-21 15:27:10 -0500
commit495163420ab5398c84af96ca3eae2c6aa4a140da (patch)
treeda1e39f0ddd9dfef62c8a4ede06a1a1451474a13 /kernel
parente9e7cb38c21c80c82af4b16608bb4c8c5ec6a28e (diff)
sched/core: Make dl_b->lock IRQ safe
Fix this lockdep warning: [ 44.804600] ========================================================= [ 44.805746] [ INFO: possible irq lock inversion dependency detected ] [ 44.805746] 3.14.0-rc2-test+ #14 Not tainted [ 44.805746] --------------------------------------------------------- [ 44.805746] bash/3674 just changed the state of lock: [ 44.805746] (&dl_b->lock){+.....}, at: [<ffffffff8106ad15>] sched_rt_handler+0x132/0x248 [ 44.805746] but this lock was taken by another, HARDIRQ-safe lock in the past: [ 44.805746] (&rq->lock){-.-.-.} and interrupts could create inverse lock ordering between them. [ 44.805746] [ 44.805746] other info that might help us debug this: [ 44.805746] Possible interrupt unsafe locking scenario: [ 44.805746] [ 44.805746] CPU0 CPU1 [ 44.805746] ---- ---- [ 44.805746] lock(&dl_b->lock); [ 44.805746] local_irq_disable(); [ 44.805746] lock(&rq->lock); [ 44.805746] lock(&dl_b->lock); [ 44.805746] <Interrupt> [ 44.805746] lock(&rq->lock); by making dl_b->lock acquiring always IRQ safe. Cc: Ingo Molnar <mingo@redhat.com> Signed-off-by: Juri Lelli <juri.lelli@gmail.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1392107067-19907-3-git-send-email-juri.lelli@gmail.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 98d33c105252..33d030a133d2 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7422,6 +7422,7 @@ static int sched_dl_global_constraints(void)
7422 u64 period = global_rt_period(); 7422 u64 period = global_rt_period();
7423 u64 new_bw = to_ratio(period, runtime); 7423 u64 new_bw = to_ratio(period, runtime);
7424 int cpu, ret = 0; 7424 int cpu, ret = 0;
7425 unsigned long flags;
7425 7426
7426 /* 7427 /*
7427 * Here we want to check the bandwidth not being set to some 7428 * Here we want to check the bandwidth not being set to some
@@ -7435,10 +7436,10 @@ static int sched_dl_global_constraints(void)
7435 for_each_possible_cpu(cpu) { 7436 for_each_possible_cpu(cpu) {
7436 struct dl_bw *dl_b = dl_bw_of(cpu); 7437 struct dl_bw *dl_b = dl_bw_of(cpu);
7437 7438
7438 raw_spin_lock(&dl_b->lock); 7439 raw_spin_lock_irqsave(&dl_b->lock, flags);
7439 if (new_bw < dl_b->total_bw) 7440 if (new_bw < dl_b->total_bw)
7440 ret = -EBUSY; 7441 ret = -EBUSY;
7441 raw_spin_unlock(&dl_b->lock); 7442 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7442 7443
7443 if (ret) 7444 if (ret)
7444 break; 7445 break;
@@ -7451,6 +7452,7 @@ static void sched_dl_do_global(void)
7451{ 7452{
7452 u64 new_bw = -1; 7453 u64 new_bw = -1;
7453 int cpu; 7454 int cpu;
7455 unsigned long flags;
7454 7456
7455 def_dl_bandwidth.dl_period = global_rt_period(); 7457 def_dl_bandwidth.dl_period = global_rt_period();
7456 def_dl_bandwidth.dl_runtime = global_rt_runtime(); 7458 def_dl_bandwidth.dl_runtime = global_rt_runtime();
@@ -7464,9 +7466,9 @@ static void sched_dl_do_global(void)
7464 for_each_possible_cpu(cpu) { 7466 for_each_possible_cpu(cpu) {
7465 struct dl_bw *dl_b = dl_bw_of(cpu); 7467 struct dl_bw *dl_b = dl_bw_of(cpu);
7466 7468
7467 raw_spin_lock(&dl_b->lock); 7469 raw_spin_lock_irqsave(&dl_b->lock, flags);
7468 dl_b->bw = new_bw; 7470 dl_b->bw = new_bw;
7469 raw_spin_unlock(&dl_b->lock); 7471 raw_spin_unlock_irqrestore(&dl_b->lock, flags);
7470 } 7472 }
7471} 7473}
7472 7474