aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/cpufreq/cpufreq_governor.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/cpufreq/cpufreq_governor.c')
-rw-r--r--drivers/cpufreq/cpufreq_governor.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 9004450863be..1b44496b2d2b 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -131,15 +131,25 @@ void dbs_check_cpu(struct dbs_data *dbs_data, int cpu)
131 * timer would not have fired during CPU-idle periods. Hence 131 * timer would not have fired during CPU-idle periods. Hence
132 * an unusually large 'wall_time' (as compared to the sampling 132 * an unusually large 'wall_time' (as compared to the sampling
133 * rate) indicates this scenario. 133 * rate) indicates this scenario.
134 *
135 * prev_load can be zero in two cases and we must recalculate it
136 * for both cases:
137 * - during long idle intervals
138 * - explicitly set to zero
134 */ 139 */
135 if (unlikely(wall_time > (2 * sampling_rate)) && 140 if (unlikely(wall_time > (2 * sampling_rate) &&
136 j_cdbs->copy_prev_load) { 141 j_cdbs->prev_load)) {
137 load = j_cdbs->prev_load; 142 load = j_cdbs->prev_load;
138 j_cdbs->copy_prev_load = false; 143
144 /*
145 * Perform a destructive copy, to ensure that we copy
146 * the previous load only once, upon the first wake-up
147 * from idle.
148 */
149 j_cdbs->prev_load = 0;
139 } else { 150 } else {
140 load = 100 * (wall_time - idle_time) / wall_time; 151 load = 100 * (wall_time - idle_time) / wall_time;
141 j_cdbs->prev_load = load; 152 j_cdbs->prev_load = load;
142 j_cdbs->copy_prev_load = true;
143 } 153 }
144 154
145 if (load > max_load) 155 if (load > max_load)
@@ -373,7 +383,6 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
373 (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle); 383 (j_cdbs->prev_cpu_wall - j_cdbs->prev_cpu_idle);
374 j_cdbs->prev_load = 100 * prev_load / 384 j_cdbs->prev_load = 100 * prev_load /
375 (unsigned int) j_cdbs->prev_cpu_wall; 385 (unsigned int) j_cdbs->prev_cpu_wall;
376 j_cdbs->copy_prev_load = true;
377 386
378 if (ignore_nice) 387 if (ignore_nice)
379 j_cdbs->prev_cpu_nice = 388 j_cdbs->prev_cpu_nice =