aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/sched.h6
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/sched/cpufreq_schedutil.c3
-rw-r--r--kernel/sched/deadline.c2
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/sched/rt.c4
-rw-r--r--kernel/sched/sched.h17
7 files changed, 22 insertions, 14 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f228c6033832..b3d697f3b573 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -300,7 +300,7 @@ struct util_est {
300 unsigned int enqueued; 300 unsigned int enqueued;
301 unsigned int ewma; 301 unsigned int ewma;
302#define UTIL_EST_WEIGHT_SHIFT 2 302#define UTIL_EST_WEIGHT_SHIFT 2
303}; 303} __attribute__((__aligned__(sizeof(u64))));
304 304
305/* 305/*
306 * The load_avg/util_avg accumulates an infinite geometric series 306 * The load_avg/util_avg accumulates an infinite geometric series
@@ -364,7 +364,7 @@ struct sched_avg {
364 unsigned long runnable_load_avg; 364 unsigned long runnable_load_avg;
365 unsigned long util_avg; 365 unsigned long util_avg;
366 struct util_est util_est; 366 struct util_est util_est;
367}; 367} ____cacheline_aligned;
368 368
369struct sched_statistics { 369struct sched_statistics {
370#ifdef CONFIG_SCHEDSTATS 370#ifdef CONFIG_SCHEDSTATS
@@ -435,7 +435,7 @@ struct sched_entity {
435 * Put into separate cache line so it does not 435 * Put into separate cache line so it does not
436 * collide with read-mostly values above. 436 * collide with read-mostly values above.
437 */ 437 */
438 struct sched_avg avg ____cacheline_aligned_in_smp; 438 struct sched_avg avg;
439#endif 439#endif
440}; 440};
441 441
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e8afd6086f23..5e10aaeebfcc 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
874 * this case, we can save a useless back to back clock update. 874 * this case, we can save a useless back to back clock update.
875 */ 875 */
876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 876 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
877 rq_clock_skip_update(rq, true); 877 rq_clock_skip_update(rq);
878} 878}
879 879
880#ifdef CONFIG_SMP 880#ifdef CONFIG_SMP
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 2b124811947d..d2c6083304b4 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -631,10 +631,9 @@ fail:
631 631
632stop_kthread: 632stop_kthread:
633 sugov_kthread_stop(sg_policy); 633 sugov_kthread_stop(sg_policy);
634
635free_sg_policy:
636 mutex_unlock(&global_tunables_lock); 634 mutex_unlock(&global_tunables_lock);
637 635
636free_sg_policy:
638 sugov_policy_free(sg_policy); 637 sugov_policy_free(sg_policy);
639 638
640disable_fast_switch: 639disable_fast_switch:
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index d1c7bf7c7e5b..e7b3008b85bb 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq)
1560 * so we don't do microscopic update in schedule() 1560 * so we don't do microscopic update in schedule()
1561 * and double the fastpath cost. 1561 * and double the fastpath cost.
1562 */ 1562 */
1563 rq_clock_skip_update(rq, true); 1563 rq_clock_skip_update(rq);
1564} 1564}
1565 1565
1566#ifdef CONFIG_SMP 1566#ifdef CONFIG_SMP
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0951d1c58d2f..54dc31e7ab9b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq)
7089 * so we don't do microscopic update in schedule() 7089 * so we don't do microscopic update in schedule()
7090 * and double the fastpath cost. 7090 * and double the fastpath cost.
7091 */ 7091 */
7092 rq_clock_skip_update(rq, true); 7092 rq_clock_skip_update(rq);
7093 } 7093 }
7094 7094
7095 set_skip_buddy(se); 7095 set_skip_buddy(se);
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 86b77987435e..7aef6b4e885a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -839,6 +839,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
839 continue; 839 continue;
840 840
841 raw_spin_lock(&rq->lock); 841 raw_spin_lock(&rq->lock);
842 update_rq_clock(rq);
843
842 if (rt_rq->rt_time) { 844 if (rt_rq->rt_time) {
843 u64 runtime; 845 u64 runtime;
844 846
@@ -859,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
859 * 'runtime'. 861 * 'runtime'.
860 */ 862 */
861 if (rt_rq->rt_nr_running && rq->curr == rq->idle) 863 if (rt_rq->rt_nr_running && rq->curr == rq->idle)
862 rq_clock_skip_update(rq, false); 864 rq_clock_cancel_skipupdate(rq);
863 } 865 }
864 if (rt_rq->rt_time || rt_rq->rt_nr_running) 866 if (rt_rq->rt_time || rt_rq->rt_nr_running)
865 idle = 0; 867 idle = 0;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c3deaee7a7a2..15750c222ca2 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq)
976 return rq->clock_task; 976 return rq->clock_task;
977} 977}
978 978
979static inline void rq_clock_skip_update(struct rq *rq, bool skip) 979static inline void rq_clock_skip_update(struct rq *rq)
980{ 980{
981 lockdep_assert_held(&rq->lock); 981 lockdep_assert_held(&rq->lock);
982 if (skip) 982 rq->clock_update_flags |= RQCF_REQ_SKIP;
983 rq->clock_update_flags |= RQCF_REQ_SKIP; 983}
984 else 984
985 rq->clock_update_flags &= ~RQCF_REQ_SKIP; 985/*
986 * See rt task throttoling, which is the only time a skip
987 * request is cancelled.
988 */
989static inline void rq_clock_cancel_skipupdate(struct rq *rq)
990{
991 lockdep_assert_held(&rq->lock);
992 rq->clock_update_flags &= ~RQCF_REQ_SKIP;
986} 993}
987 994
988struct rq_flags { 995struct rq_flags {