summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2018-04-04 12:15:39 -0400
committerIngo Molnar <mingo@kernel.org>2018-04-05 03:20:46 -0400
commitadcc8da8859bee9548bb6d323b1e8de8a7252acd (patch)
treec2679a00cb4f45372763140d624c33285a70d1fd /kernel/sched
parentd29a20645d5e929aa7e8616f28e5d8e1c49263ec (diff)
sched/core: Simplify helpers for rq clock update skip requests
By renaming the functions we can get rid of the skip parameter and have better code redability. It makes zero sense to have things such as: rq_clock_skip_update(rq, false) When the skip request is in fact not going to happen. Ever. Rename things such that we end up with: rq_clock_skip_update(rq) rq_clock_cancel_skipupdate(rq) Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: linux-kernel@vger.kernel.org Cc: matt@codeblueprint.co.uk Cc: rostedt@goodmis.org Link: http://lkml.kernel.org/r/20180404161539.nhadkff2aats74jh@linux-n805 Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h17
5 files changed, 16 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 28b68995a417..550a07f648b6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
874 * this case, we can save a useless back to back clock update. 874 * this case, we can save a useless back to back clock update.
875 */ 875 */
876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
877 rq_clock_skip_update(rq, true); 877 rq_clock_skip_update(rq);
878} 878}
879 879
880#ifdef CONFIG_SMP 880#ifdef CONFIG_SMP
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d1c7bf7c7e5b..e7b3008b85bb 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
1560 * so we don't do microscopic update in schedule() 1560 * so we don't do microscopic update in schedule()
1561 * and double the fastpath cost. 1561 * and double the fastpath cost.
1562 */ 1562 */
1563 rq_clock_skip_update(rq, true); 1563 rq_clock_skip_update(rq);
1564} 1564}
1565 1565
1566#ifdef CONFIG_SMP 1566#ifdef CONFIG_SMP
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0951d1c58d2f..54dc31e7ab9b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
7089 * so we don't do microscopic update in schedule() 7089 * so we don't do microscopic update in schedule()
7090 * and double the fastpath cost. 7090 * and double the fastpath cost.
7091 */ 7091 */
7092 rq_clock_skip_update(rq, true); 7092 rq_clock_skip_update(rq);
7093 } 7093 }
7094 7094
7095 set_skip_buddy(se); 7095 set_skip_buddy(se);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ad13e6242481..7aef6b4e885a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -861,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
861 * 'runtime'. 861 * 'runtime'.
862 */ 862 */
863 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 863 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
864 rq_clock_skip_update(rq, false); 864 rq_clock_cancel_skipupdate(rq);
865 } 865 }
866 if (rt_rq->rt_time || rt_rq->rt_nr_running) 866 if (rt_rq->rt_time || rt_rq->rt_nr_running)
867 idle = 0; 867 idle = 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c3deaee7a7a2..15750c222ca2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
976 return rq->clock_task; 976 return rq->clock_task;
977} 977}
978 978
979static inline void rq_clock_skip_update(struct rq *rq, bool skip) 979static inline void rq_clock_skip_update(struct rq *rq)
980{ 980{
981 lockdep_assert_held(&rq->lock); 981 lockdep_assert_held(&rq->lock);
982 if (skip) 982 rq->clock_update_flags |= RQCF_REQ_SKIP;
983 rq->clock_update_flags |= RQCF_REQ_SKIP; 983}
984 else 984
985 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 985/*
986 * See rt task throttoling, which is the only time a skip
987 * request is cancelled.
988 */
989static inline void rq_clock_cancel_skipupdate(struct rq *rq)
990{
991 lockdep_assert_held(&rq->lock);
992 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
986} 993}
987 994
988struct rq_flags { 995struct rq_flags {