diff options
author | Viresh Kumar <viresh.kumar@linaro.org> | 2014-06-09 04:51:24 -0400 |
---|---|---|
committer | Rafael J. Wysocki <rafael.j.wysocki@intel.com> | 2014-06-09 06:58:21 -0400 |
commit | c8ae481b9a12f5cea080651ea87736104b111f8e (patch) | |
tree | 72fd7b221acfa258ae47df42a930d6fe21c38878 /drivers/cpufreq | |
parent | 18b46abd0009516c1973a57ccf4d01b9eaa3422a (diff) |
cpufreq: governor: remove copy_prev_load from 'struct cpu_dbs_common_info'
'copy_prev_load' was recently added by commit: 18b46ab (cpufreq: governor: Be
friendly towards latency-sensitive bursty workloads).
It actually is a bit redundant as we also have 'prev_load' which can store any
integer value and can be used instead of 'copy_prev_load' by setting it zero.
True load can also turn out to be zero during long idle intervals (and hence the
actual value of 'prev_load' and the overloaded value can clash). However this is
not a problem because, if the true load was really zero in the previous
interval, it makes sense to evaluate the load afresh for the current interval
rather than copying the previous load.
So, drop 'copy_prev_load' and use 'prev_load' instead.
Update comments as well to make it more clear.
There is another change here which was probably missed by Srivatsa during the
last version of updates he made. The unlikely in the 'if' statement was covering
only half of the condition and the whole line should actually come under it.
Also checkpatch is made more silent as it was reporting this (--strict option):
CHECK: Alignment should match open parenthesis
+ if (unlikely(wall_time > (2 * sampling_rate) &&
+ j_cdbs->prev_load)) {
Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
Reviewed-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Acked-by: Pavel Machek <pavel@ucw.cz>
Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 19 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.h | 9 |
2 files changed, 19 insertions, 9 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 9004450863be..1b44496b2d2b 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -131,15 +131,25 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu) | |||
131 | * timer would not have fired during CPU-idle periods. Hence | 131 | * timer would not have fired during CPU-idle periods. Hence |
132 | * an unusually large 'wall_time' (as compared to the sampling | 132 | * an unusually large 'wall_time' (as compared to the sampling |
133 | * rate) indicates this scenario. | 133 | * rate) indicates this scenario. |
134 | * | ||
135 | * prev_load can be zero in two cases and we must recalculate it | ||
136 | * for both cases: | ||
137 | * - during long idle intervals | ||
138 | * - explicitly set to zero | ||
134 | */ | 139 | */ |
135 | if (unlikely(wall_time > (2 * sampling_rate)) && | 140 | if (unlikely(wall_time > (2 * sampling_rate) && |
136 | j_cdbs->copy_prev_load) { | 141 | j_cdbs->prev_load)) { |
137 | load = j_cdbs->prev_load; | 142 | load = j_cdbs->prev_load; |
138 | j_cdbs->copy_prev_load = false; | 143 | |
144 | /* | ||
145 | * Perform a destructive copy, to ensure that we copy | ||
146 | * the previous load only once, upon the first wake-up | ||
147 | * from idle. | ||
148 | */ | ||
149 | j_cdbs->prev_load = 0; | ||
139 | } else { | 150 | } else { |
140 | load = 100 * (wall_time - idle_time) / wall_time; | 151 | load = 100 * (wall_time - idle_time) / wall_time; |
141 | j_cdbs->prev_load = load; | 152 | j_cdbs->prev_load = load; |
142 | j_cdbs->copy_prev_load = true; | ||
143 | } | 153 | } |
144 | 154 | ||
145 | if (load > max_load) | 155 | if (load > max_load) |
@@ -373,7 +383,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
373 | (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); | 383 | (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); |
374 | j_cdbs->prev_load = 100 * prev_load / | 384 | j_cdbs->prev_load = 100 * prev_load / |
375 | (unsigned int) j_cdbs->prev_cpu_wall; | 385 | (unsigned int) j_cdbs->prev_cpu_wall; |
376 | j_cdbs->copy_prev_load = true; | ||
377 | 386 | ||
378 | if (ignore_nice) | 387 | if (ignore_nice) |
379 | j_cdbs->prev_cpu_nice = | 388 | j_cdbs->prev_cpu_nice = |
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index c2a5b7e8070a..cc401d147e72 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -134,12 +134,13 @@ struct cpu_dbs_common_info { | |||
134 | u64 prev_cpu_idle; | 134 | u64 prev_cpu_idle; |
135 | u64 prev_cpu_wall; | 135 | u64 prev_cpu_wall; |
136 | u64 prev_cpu_nice; | 136 | u64 prev_cpu_nice; |
137 | unsigned int prev_load; | ||
138 | /* | 137 | /* |
139 | * Flag to ensure that we copy the previous load only once, upon the | 138 | * Used to keep track of load in the previous interval. However, when |
140 | * first wake-up from idle. | 139 | * explicitly set to zero, it is used as a flag to ensure that we copy |
140 | * the previous load to the current interval only once, upon the first | ||
141 | * wake-up from idle. | ||
141 | */ | 142 | */ |
142 | bool copy_prev_load; | 143 | unsigned int prev_load; |
143 | struct cpufreq_policy *cur_policy; | 144 | struct cpufreq_policy *cur_policy; |
144 | struct delayed_work work; | 145 | struct delayed_work work; |
145 | /* | 146 | /* |