diff options
author | Wanpeng Li <wanpeng.li@hotmail.com> | 2016-04-22 05:07:24 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-04-28 04:39:54 -0400 |
commit | 594dd290cf5403a9a5818619dfff42d8e8e0518e (patch) | |
tree | 31fa5c3c77c6c5cc293a91bd85cef310f0b988e6 /kernel/sched | |
parent | fec148c000d0f9ac21679601722811eb60b4cc52 (diff) |
sched/cpufreq: Optimize cpufreq update kicker to avoid update multiple times
Sometimes delta_exec is 0 due to update_curr() is called multiple times,
this is captured by:
u64 delta_exec = rq_clock_task(rq) - curr->se.exec_start;
This patch optimizes the cpufreq update kicker by bailing out when nothing
changed, it will benefit the upcoming schedutil, since otherwise it will
(over)react to the special util/max combination.
Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: http://lkml.kernel.org/r/1461316044-9520-1-git-send-email-wanpeng.li@hotmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/sched')
-rw-r--r-- | kernel/sched/deadline.c | 8 | ||||
-rw-r--r-- | kernel/sched/rt.c | 8 |
2 files changed, 8 insertions, 8 deletions
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index affd97ec9f65..8f9b5af4e857 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -717,10 +717,6 @@ static void update_curr_dl(struct rq *rq) | |||
717 | if (!dl_task(curr) || !on_dl_rq(dl_se)) | 717 | if (!dl_task(curr) || !on_dl_rq(dl_se)) |
718 | return; | 718 | return; |
719 | 719 | ||
720 | /* Kick cpufreq (see the comment in linux/cpufreq.h). */ | ||
721 | if (cpu_of(rq) == smp_processor_id()) | ||
722 | cpufreq_trigger_update(rq_clock(rq)); | ||
723 | |||
724 | /* | 720 | /* |
725 | * Consumed budget is computed considering the time as | 721 | * Consumed budget is computed considering the time as |
726 | * observed by schedulable tasks (excluding time spent | 722 | * observed by schedulable tasks (excluding time spent |
@@ -736,6 +732,10 @@ static void update_curr_dl(struct rq *rq) | |||
736 | return; | 732 | return; |
737 | } | 733 | } |
738 | 734 | ||
735 | /* kick cpufreq (see the comment in linux/cpufreq.h). */ | ||
736 | if (cpu_of(rq) == smp_processor_id()) | ||
737 | cpufreq_trigger_update(rq_clock(rq)); | ||
738 | |||
739 | schedstat_set(curr->se.statistics.exec_max, | 739 | schedstat_set(curr->se.statistics.exec_max, |
740 | max(curr->se.statistics.exec_max, delta_exec)); | 740 | max(curr->se.statistics.exec_max, delta_exec)); |
741 | 741 | ||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index c41ea7ac1764..19e13060fcd5 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -953,14 +953,14 @@ static void update_curr_rt(struct rq *rq) | |||
953 | if (curr->sched_class != &rt_sched_class) | 953 | if (curr->sched_class != &rt_sched_class) |
954 | return; | 954 | return; |
955 | 955 | ||
956 | /* Kick cpufreq (see the comment in linux/cpufreq.h). */ | ||
957 | if (cpu_of(rq) == smp_processor_id()) | ||
958 | cpufreq_trigger_update(rq_clock(rq)); | ||
959 | |||
960 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; | 956 | delta_exec = rq_clock_task(rq) - curr->se.exec_start; |
961 | if (unlikely((s64)delta_exec <= 0)) | 957 | if (unlikely((s64)delta_exec <= 0)) |
962 | return; | 958 | return; |
963 | 959 | ||
960 | /* Kick cpufreq (see the comment in linux/cpufreq.h). */ | ||
961 | if (cpu_of(rq) == smp_processor_id()) | ||
962 | cpufreq_trigger_update(rq_clock(rq)); | ||
963 | |||
964 | schedstat_set(curr->se.statistics.exec_max, | 964 | schedstat_set(curr->se.statistics.exec_max, |
965 | max(curr->se.statistics.exec_max, delta_exec)); | 965 | max(curr->se.statistics.exec_max, delta_exec)); |
966 | 966 | ||