diff options
author | Tim Chen <tim.c.chen@linux.intel.com> | 2014-12-12 18:38:12 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-01-30 13:38:49 -0500 |
commit | 80e3d87b2c5582db0ab5e39610ce3707d97ba409 (patch) | |
tree | 26ce03eb8edeebe4eca36d6be4de3c35e7494279 /kernel/sched | |
parent | 3847b272248a3a4ed70d20392cc0454917f7713b (diff) |
sched/rt: Reduce rq lock contention by eliminating locking of non-feasible target
This patch adds checks that prevens futile attempts to move rt tasks
to a CPU with active tasks of equal or higher priority.
This reduces run queue lock contention and improves the performance of
a well known OLTP benchmark by 0.7%.
Signed-off-by: Tim Chen <tim.c.chen@linux.intel.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Shawn Bohrer <sbohrer@rgmadvisors.com>
Cc: Suruchi Kadu <suruchi.a.kadu@intel.com>
Cc: Doug Nelson<doug.nelson@intel.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1421430374.2399.27.camel@schen9-desk2.jf.intel.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/rt.c | 17 |
1 files changed, 16 insertions, 1 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 6725e3c49660..f4d4b077eba0 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -1340,7 +1340,12 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) | |||
1340 | curr->prio <= p->prio)) { | 1340 | curr->prio <= p->prio)) { |
1341 | int target = find_lowest_rq(p); | 1341 | int target = find_lowest_rq(p); |
1342 | 1342 | ||
1343 | if (target != -1) | 1343 | /* |
1344 | * Don't bother moving it if the destination CPU is | ||
1345 | * not running a lower priority task. | ||
1346 | */ | ||
1347 | if (target != -1 && | ||
1348 | p->prio < cpu_rq(target)->rt.highest_prio.curr) | ||
1344 | cpu = target; | 1349 | cpu = target; |
1345 | } | 1350 | } |
1346 | rcu_read_unlock(); | 1351 | rcu_read_unlock(); |
@@ -1617,6 +1622,16 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) | |||
1617 | 1622 | ||
1618 | lowest_rq = cpu_rq(cpu); | 1623 | lowest_rq = cpu_rq(cpu); |
1619 | 1624 | ||
1625 | if (lowest_rq->rt.highest_prio.curr <= task->prio) { | ||
1626 | /* | ||
1627 | * Target rq has tasks of equal or higher priority, | ||
1628 | * retrying does not release any lock and is unlikely | ||
1629 | * to yield a different result. | ||
1630 | */ | ||
1631 | lowest_rq = NULL; | ||
1632 | break; | ||
1633 | } | ||
1634 | |||
1620 | /* if the prio of this runqueue changed, try again */ | 1635 | /* if the prio of this runqueue changed, try again */ |
1621 | if (double_lock_balance(rq, lowest_rq)) { | 1636 | if (double_lock_balance(rq, lowest_rq)) { |
1622 | /* | 1637 | /* |