aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 22:42:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 22:42:15 -0400
commitb2e09f633a3994ee97fa6bc734b533d9c8e6ea0f (patch)
tree8f398d3f7ac19a4f4d64862086597f335d977203 /kernel/sched/rt.c
parent3737a12761636ebde0f09ef49daebb8eed18cc8a (diff)
parent535560d841b2d54f31280e05e9c6ffd19da0c4e7 (diff)
Merge branch 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull more scheduler updates from Ingo Molnar: "Second round of scheduler changes: - try-to-wakeup and IPI reduction speedups, from Andy Lutomirski - continued power scheduling cleanups and refactorings, from Nicolas Pitre - misc fixes and enhancements" * 'sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/deadline: Delete extraneous extern for to_ratio() sched/idle: Optimize try-to-wake-up IPI sched/idle: Simplify wake_up_idle_cpu() sched/idle: Clear polling before descheduling the idle thread sched, trace: Add a tracepoint for IPI-less remote wakeups cpuidle: Set polling in poll_idle sched: Remove redundant assignment to "rt_rq" in update_curr_rt(...) sched: Rename capacity related flags sched: Final power vs. capacity cleanups sched: Remove remaining dubious usage of "power" sched: Let 'struct sched_group_power' care about CPU capacity sched/fair: Disambiguate existing/remaining "capacity" usage sched/fair: Change "has_capacity" to "has_free_capacity" sched/fair: Remove "power" from 'struct numa_stats' sched: Fix signedness bug in yield_to() sched/fair: Use time_after() in record_wakee() sched/balancing: Reduce the rate of needless idle load balancing sched/fair: Fix unlocked reads of some cfs_b->quota/period
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index b3512f1afce9..a49083192c64 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -918,7 +918,6 @@ static void update_curr_rt(struct rq *rq)
918{ 918{
919 struct task_struct *curr = rq->curr; 919 struct task_struct *curr = rq->curr;
920 struct sched_rt_entity *rt_se = &curr->rt; 920 struct sched_rt_entity *rt_se = &curr->rt;
921 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
922 u64 delta_exec; 921 u64 delta_exec;
923 922
924 if (curr->sched_class != &rt_sched_class) 923 if (curr->sched_class != &rt_sched_class)
@@ -943,7 +942,7 @@ static void update_curr_rt(struct rq *rq)
943 return; 942 return;
944 943
945 for_each_sched_rt_entity(rt_se) { 944 for_each_sched_rt_entity(rt_se) {
946 rt_rq = rt_rq_of_se(rt_se); 945 struct rt_rq *rt_rq = rt_rq_of_se(rt_se);
947 946
948 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) { 947 if (sched_rt_runtime(rt_rq) != RUNTIME_INF) {
949 raw_spin_lock(&rt_rq->rt_runtime_lock); 948 raw_spin_lock(&rt_rq->rt_runtime_lock);