diff options
-rw-r--r-- | kernel/sched/core.c | 2 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/sched/rt.c | 2 | ||||
-rw-r--r-- | kernel/sched/sched.h | 17 |
5 files changed, 16 insertions, 9 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 28b68995a417..550a07f648b6 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
874 | * this case, we can save a useless back to back clock update. | 874 | * this case, we can save a useless back to back clock update. |
875 | */ | 875 | */ |
876 | if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) | 876 | if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) |
877 | rq_clock_skip_update(rq, true); | 877 | rq_clock_skip_update(rq); |
878 | } | 878 | } |
879 | 879 | ||
880 | #ifdef CONFIG_SMP | 880 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d1c7bf7c7e5b..e7b3008b85bb 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq) | |||
1560 | * so we don't do microscopic update in schedule() | 1560 | * so we don't do microscopic update in schedule() |
1561 | * and double the fastpath cost. | 1561 | * and double the fastpath cost. |
1562 | */ | 1562 | */ |
1563 | rq_clock_skip_update(rq, true); | 1563 | rq_clock_skip_update(rq); |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | #ifdef CONFIG_SMP | 1566 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0951d1c58d2f..54dc31e7ab9b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq) | |||
7089 | * so we don't do microscopic update in schedule() | 7089 | * so we don't do microscopic update in schedule() |
7090 | * and double the fastpath cost. | 7090 | * and double the fastpath cost. |
7091 | */ | 7091 | */ |
7092 | rq_clock_skip_update(rq, true); | 7092 | rq_clock_skip_update(rq); |
7093 | } | 7093 | } |
7094 | 7094 | ||
7095 | set_skip_buddy(se); | 7095 | set_skip_buddy(se); |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index ad13e6242481..7aef6b4e885a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -861,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
861 | * 'runtime'. | 861 | * 'runtime'. |
862 | */ | 862 | */ |
863 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) | 863 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) |
864 | rq_clock_skip_update(rq, false); | 864 | rq_clock_cancel_skipupdate(rq); |
865 | } | 865 | } |
866 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 866 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
867 | idle = 0; | 867 | idle = 0; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c3deaee7a7a2..15750c222ca2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq) | |||
976 | return rq->clock_task; | 976 | return rq->clock_task; |
977 | } | 977 | } |
978 | 978 | ||
979 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) | 979 | static inline void rq_clock_skip_update(struct rq *rq) |
980 | { | 980 | { |
981 | lockdep_assert_held(&rq->lock); | 981 | lockdep_assert_held(&rq->lock); |
982 | if (skip) | 982 | rq->clock_update_flags |= RQCF_REQ_SKIP; |
983 | rq->clock_update_flags |= RQCF_REQ_SKIP; | 983 | } |
984 | else | 984 | |
985 | rq->clock_update_flags &= ~RQCF_REQ_SKIP; | 985 | /* |
986 | * See rt task throttoling, which is the only time a skip | ||
987 | * request is cancelled. | ||
988 | */ | ||
989 | static inline void rq_clock_cancel_skipupdate(struct rq *rq) | ||
990 | { | ||
991 | lockdep_assert_held(&rq->lock); | ||
992 | rq->clock_update_flags &= ~RQCF_REQ_SKIP; | ||
986 | } | 993 | } |
987 | 994 | ||
988 | struct rq_flags { | 995 | struct rq_flags { |