summaryrefslogtreecommitdiffstats
path: root/kernel/sched/rt.c
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2015-06-11 08:46:40 -0400
committerThomas Gleixner <tglx@linutronix.de>2015-06-18 18:25:26 -0400
commit8046d6806247088de5725eaf8a2580b29e50ac5a (patch)
tree08f620cfc69d58effc57115fac4703d06e5ef2ef /kernel/sched/rt.c
parent4c9a4bc89a9cca8128bce67d6bc8870d6b7ee0b2 (diff)
sched,rt: Remove return value from pull_rt_task()
In order to be able to use pull_rt_task() from a callback, we need to do away with the return value. Since the return value indicates if we should reschedule, do this inside the function. Since not all callers currently do this, this can increase the number of reschedules due rt balancing. Too many reschedules is not a correctness issues, too few are. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: ktkhai@parallels.com Cc: rostedt@goodmis.org Cc: juri.lelli@gmail.com Cc: pang.xunlei@linaro.org Cc: oleg@redhat.com Cc: wanpeng.li@linux.intel.com Cc: umgwanakikbuti@gmail.com Link: http://lkml.kernel.org/r/20150611124742.679002000@infradead.org Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/sched/rt.c')
-rw-r--r--kernel/sched/rt.c22
1 files changed, 11 insertions, 11 deletions
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 4f3726fe1246..c702b48de9f0 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -260,7 +260,7 @@ int alloc_rt_sched_group(struct task_group *tg, struct task_group *parent)
260 260
261#ifdef CONFIG_SMP 261#ifdef CONFIG_SMP
262 262
263static int pull_rt_task(struct rq *this_rq); 263static void pull_rt_task(struct rq *this_rq);
264 264
265static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev) 265static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
266{ 266{
@@ -415,9 +415,8 @@ static inline bool need_pull_rt_task(struct rq *rq, struct task_struct *prev)
415 return false; 415 return false;
416} 416}
417 417
418static inline int pull_rt_task(struct rq *this_rq) 418static inline void pull_rt_task(struct rq *this_rq)
419{ 419{
420 return 0;
421} 420}
422 421
423static inline void queue_push_tasks(struct rq *rq) 422static inline void queue_push_tasks(struct rq *rq)
@@ -1955,14 +1954,15 @@ static void push_irq_work_func(struct irq_work *work)
1955} 1954}
1956#endif /* HAVE_RT_PUSH_IPI */ 1955#endif /* HAVE_RT_PUSH_IPI */
1957 1956
1958static int pull_rt_task(struct rq *this_rq) 1957static void pull_rt_task(struct rq *this_rq)
1959{ 1958{
1960 int this_cpu = this_rq->cpu, ret = 0, cpu; 1959 int this_cpu = this_rq->cpu, cpu;
1960 bool resched = false;
1961 struct task_struct *p; 1961 struct task_struct *p;
1962 struct rq *src_rq; 1962 struct rq *src_rq;
1963 1963
1964 if (likely(!rt_overloaded(this_rq))) 1964 if (likely(!rt_overloaded(this_rq)))
1965 return 0; 1965 return;
1966 1966
1967 /* 1967 /*
1968 * Match the barrier from rt_set_overloaded; this guarantees that if we 1968 * Match the barrier from rt_set_overloaded; this guarantees that if we
@@ -1973,7 +1973,7 @@ static int pull_rt_task(struct rq *this_rq)
1973#ifdef HAVE_RT_PUSH_IPI 1973#ifdef HAVE_RT_PUSH_IPI
1974 if (sched_feat(RT_PUSH_IPI)) { 1974 if (sched_feat(RT_PUSH_IPI)) {
1975 tell_cpu_to_push(this_rq); 1975 tell_cpu_to_push(this_rq);
1976 return 0; 1976 return;
1977 } 1977 }
1978#endif 1978#endif
1979 1979
@@ -2026,7 +2026,7 @@ static int pull_rt_task(struct rq *this_rq)
2026 if (p->prio < src_rq->curr->prio) 2026 if (p->prio < src_rq->curr->prio)
2027 goto skip; 2027 goto skip;
2028 2028
2029 ret = 1; 2029 resched = true;
2030 2030
2031 deactivate_task(src_rq, p, 0); 2031 deactivate_task(src_rq, p, 0);
2032 set_task_cpu(p, this_cpu); 2032 set_task_cpu(p, this_cpu);
@@ -2042,7 +2042,8 @@ skip:
2042 double_unlock_balance(this_rq, src_rq); 2042 double_unlock_balance(this_rq, src_rq);
2043 } 2043 }
2044 2044
2045 return ret; 2045 if (resched)
2046 resched_curr(this_rq);
2046} 2047}
2047 2048
2048/* 2049/*
@@ -2138,8 +2139,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
2138 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running) 2139 if (!task_on_rq_queued(p) || rq->rt.rt_nr_running)
2139 return; 2140 return;
2140 2141
2141 if (pull_rt_task(rq)) 2142 pull_rt_task(rq);
2142 resched_curr(rq);
2143} 2143}
2144 2144
2145void __init init_sched_rt_class(void) 2145void __init init_sched_rt_class(void)