diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-15 15:43:30 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-04-15 15:43:30 -0400 |
commit | 71b8ebbf3d7bee88427eb207ef643f2f6447c625 (patch) | |
tree | e47dabc90f3e67c3f63b7aea7b76de92e7ccfa4c /kernel/sched | |
parent | 174e719439b8224d7cedfbdd9529de396cac01ff (diff) | |
parent | 317d359df95dd0cb7653d09b7fc513770590cf85 (diff) |
Merge branch 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Thomas Gleixner:
"A few scheduler fixes:
- Prevent a bogus warning vs. runqueue clock update flags in
do_sched_rt_period_timer()
- Simplify the helper functions which handle requests for skipping
the runqueue clock updat.
- Do not unlock the tunables mutex in the error path of the cpu
frequency scheduler utils. Its not held.
- Enforce proper alignement for 'struct util_est' in sched_avg to
prevent a misalignment fault on IA64"
* 'sched-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
sched/core: Force proper alignment of 'struct util_est'
sched/core: Simplify helpers for rq clock update skip requests
sched/rt: Fix rq->clock_update_flags < RQCF_ACT_SKIP warning
sched/cpufreq/schedutil: Fix error path mutex unlock
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/core.c | 2 | ||||
-rw-r--r-- | kernel/sched/cpufreq_schedutil.c | 3 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/sched/rt.c | 4 | ||||
-rw-r--r-- | kernel/sched/sched.h | 17 |
6 files changed, 19 insertions, 11 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index e8afd6086f23..5e10aaeebfcc 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -874,7 +874,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
874 | * this case, we can save a useless back to back clock update. | 874 | * this case, we can save a useless back to back clock update. |
875 | */ | 875 | */ |
876 | if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) | 876 | if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) |
877 | rq_clock_skip_update(rq, true); | 877 | rq_clock_skip_update(rq); |
878 | } | 878 | } |
879 | 879 | ||
880 | #ifdef CONFIG_SMP | 880 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c index 2b124811947d..d2c6083304b4 100644 --- a/kernel/sched/cpufreq_schedutil.c +++ b/kernel/sched/cpufreq_schedutil.c | |||
@@ -631,10 +631,9 @@ fail: | |||
631 | 631 | ||
632 | stop_kthread: | 632 | stop_kthread: |
633 | sugov_kthread_stop(sg_policy); | 633 | sugov_kthread_stop(sg_policy); |
634 | |||
635 | free_sg_policy: | ||
636 | mutex_unlock(&global_tunables_lock); | 634 | mutex_unlock(&global_tunables_lock); |
637 | 635 | ||
636 | free_sg_policy: | ||
638 | sugov_policy_free(sg_policy); | 637 | sugov_policy_free(sg_policy); |
639 | 638 | ||
640 | disable_fast_switch: | 639 | disable_fast_switch: |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index d1c7bf7c7e5b..e7b3008b85bb 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1560,7 +1560,7 @@ static void yield_task_dl(struct rq *rq) | |||
1560 | * so we don't do microscopic update in schedule() | 1560 | * so we don't do microscopic update in schedule() |
1561 | * and double the fastpath cost. | 1561 | * and double the fastpath cost. |
1562 | */ | 1562 | */ |
1563 | rq_clock_skip_update(rq, true); | 1563 | rq_clock_skip_update(rq); |
1564 | } | 1564 | } |
1565 | 1565 | ||
1566 | #ifdef CONFIG_SMP | 1566 | #ifdef CONFIG_SMP |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0951d1c58d2f..54dc31e7ab9b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -7089,7 +7089,7 @@ static void yield_task_fair(struct rq *rq) | |||
7089 | * so we don't do microscopic update in schedule() | 7089 | * so we don't do microscopic update in schedule() |
7090 | * and double the fastpath cost. | 7090 | * and double the fastpath cost. |
7091 | */ | 7091 | */ |
7092 | rq_clock_skip_update(rq, true); | 7092 | rq_clock_skip_update(rq); |
7093 | } | 7093 | } |
7094 | 7094 | ||
7095 | set_skip_buddy(se); | 7095 | set_skip_buddy(se); |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index 86b77987435e..7aef6b4e885a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -839,6 +839,8 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
839 | continue; | 839 | continue; |
840 | 840 | ||
841 | raw_spin_lock(&rq->lock); | 841 | raw_spin_lock(&rq->lock); |
842 | update_rq_clock(rq); | ||
843 | |||
842 | if (rt_rq->rt_time) { | 844 | if (rt_rq->rt_time) { |
843 | u64 runtime; | 845 | u64 runtime; |
844 | 846 | ||
@@ -859,7 +861,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) | |||
859 | * 'runtime'. | 861 | * 'runtime'. |
860 | */ | 862 | */ |
861 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) | 863 | if (rt_rq->rt_nr_running && rq->curr == rq->idle) |
862 | rq_clock_skip_update(rq, false); | 864 | rq_clock_cancel_skipupdate(rq); |
863 | } | 865 | } |
864 | if (rt_rq->rt_time || rt_rq->rt_nr_running) | 866 | if (rt_rq->rt_time || rt_rq->rt_nr_running) |
865 | idle = 0; | 867 | idle = 0; |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c3deaee7a7a2..15750c222ca2 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -976,13 +976,20 @@ static inline u64 rq_clock_task(struct rq *rq) | |||
976 | return rq->clock_task; | 976 | return rq->clock_task; |
977 | } | 977 | } |
978 | 978 | ||
979 | static inline void rq_clock_skip_update(struct rq *rq, bool skip) | 979 | static inline void rq_clock_skip_update(struct rq *rq) |
980 | { | 980 | { |
981 | lockdep_assert_held(&rq->lock); | 981 | lockdep_assert_held(&rq->lock); |
982 | if (skip) | 982 | rq->clock_update_flags |= RQCF_REQ_SKIP; |
983 | rq->clock_update_flags |= RQCF_REQ_SKIP; | 983 | } |
984 | else | 984 | |
985 | rq->clock_update_flags &= ~RQCF_REQ_SKIP; | 985 | /* |
986 | * See rt task throttoling, which is the only time a skip | ||
987 | * request is cancelled. | ||
988 | */ | ||
989 | static inline void rq_clock_cancel_skipupdate(struct rq *rq) | ||
990 | { | ||
991 | lockdep_assert_held(&rq->lock); | ||
992 | rq->clock_update_flags &= ~RQCF_REQ_SKIP; | ||
986 | } | 993 | } |
987 | 994 | ||
988 | struct rq_flags { | 995 | struct rq_flags { |