diff options
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 38 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.c | 37 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_governor.h | 14 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 41 |
4 files changed, 47 insertions, 83 deletions
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index a69eb7eae7ec..4f640b028c94 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -60,7 +60,7 @@ static void cs_check_cpu(int cpu, unsigned int load) | |||
60 | return; | 60 | return; |
61 | 61 | ||
62 | /* Check for frequency increase */ | 62 | /* Check for frequency increase */ |
63 | if (load > cs_tuners->up_threshold) { | 63 | if (load > dbs_data->up_threshold) { |
64 | dbs_info->down_skip = 0; | 64 | dbs_info->down_skip = 0; |
65 | 65 | ||
66 | /* if we are already at full speed then break out early */ | 66 | /* if we are already at full speed then break out early */ |
@@ -78,7 +78,7 @@ static void cs_check_cpu(int cpu, unsigned int load) | |||
78 | } | 78 | } |
79 | 79 | ||
80 | /* if sampling_down_factor is active break out early */ | 80 | /* if sampling_down_factor is active break out early */ |
81 | if (++dbs_info->down_skip < cs_tuners->sampling_down_factor) | 81 | if (++dbs_info->down_skip < dbs_data->sampling_down_factor) |
82 | return; | 82 | return; |
83 | dbs_info->down_skip = 0; | 83 | dbs_info->down_skip = 0; |
84 | 84 | ||
@@ -107,10 +107,9 @@ static unsigned int cs_dbs_timer(struct cpufreq_policy *policy) | |||
107 | { | 107 | { |
108 | struct policy_dbs_info *policy_dbs = policy->governor_data; | 108 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
109 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | 109 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
110 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
111 | 110 | ||
112 | dbs_check_cpu(policy); | 111 | dbs_check_cpu(policy); |
113 | return delay_for_sampling_rate(cs_tuners->sampling_rate); | 112 | return delay_for_sampling_rate(dbs_data->sampling_rate); |
114 | } | 113 | } |
115 | 114 | ||
116 | static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | 115 | static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val, |
@@ -126,7 +125,6 @@ static struct dbs_governor cs_dbs_gov; | |||
126 | static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, | 125 | static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, |
127 | const char *buf, size_t count) | 126 | const char *buf, size_t count) |
128 | { | 127 | { |
129 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
130 | unsigned int input; | 128 | unsigned int input; |
131 | int ret; | 129 | int ret; |
132 | ret = sscanf(buf, "%u", &input); | 130 | ret = sscanf(buf, "%u", &input); |
@@ -134,14 +132,13 @@ static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, | |||
134 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 132 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
135 | return -EINVAL; | 133 | return -EINVAL; |
136 | 134 | ||
137 | cs_tuners->sampling_down_factor = input; | 135 | dbs_data->sampling_down_factor = input; |
138 | return count; | 136 | return count; |
139 | } | 137 | } |
140 | 138 | ||
141 | static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, | 139 | static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, |
142 | size_t count) | 140 | size_t count) |
143 | { | 141 | { |
144 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
145 | unsigned int input; | 142 | unsigned int input; |
146 | int ret; | 143 | int ret; |
147 | ret = sscanf(buf, "%u", &input); | 144 | ret = sscanf(buf, "%u", &input); |
@@ -149,7 +146,7 @@ static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf, | |||
149 | if (ret != 1) | 146 | if (ret != 1) |
150 | return -EINVAL; | 147 | return -EINVAL; |
151 | 148 | ||
152 | cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate); | 149 | dbs_data->sampling_rate = max(input, dbs_data->min_sampling_rate); |
153 | return count; | 150 | return count; |
154 | } | 151 | } |
155 | 152 | ||
@@ -164,7 +161,7 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, | |||
164 | if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) | 161 | if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold) |
165 | return -EINVAL; | 162 | return -EINVAL; |
166 | 163 | ||
167 | cs_tuners->up_threshold = input; | 164 | dbs_data->up_threshold = input; |
168 | return count; | 165 | return count; |
169 | } | 166 | } |
170 | 167 | ||
@@ -178,7 +175,7 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, | |||
178 | 175 | ||
179 | /* cannot be lower than 11 otherwise freq will not fall */ | 176 | /* cannot be lower than 11 otherwise freq will not fall */ |
180 | if (ret != 1 || input < 11 || input > 100 || | 177 | if (ret != 1 || input < 11 || input > 100 || |
181 | input >= cs_tuners->up_threshold) | 178 | input >= dbs_data->up_threshold) |
182 | return -EINVAL; | 179 | return -EINVAL; |
183 | 180 | ||
184 | cs_tuners->down_threshold = input; | 181 | cs_tuners->down_threshold = input; |
@@ -188,7 +185,6 @@ static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf, | |||
188 | static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, | 185 | static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, |
189 | const char *buf, size_t count) | 186 | const char *buf, size_t count) |
190 | { | 187 | { |
191 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
192 | unsigned int input, j; | 188 | unsigned int input, j; |
193 | int ret; | 189 | int ret; |
194 | 190 | ||
@@ -199,10 +195,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, | |||
199 | if (input > 1) | 195 | if (input > 1) |
200 | input = 1; | 196 | input = 1; |
201 | 197 | ||
202 | if (input == cs_tuners->ignore_nice_load) /* nothing to do */ | 198 | if (input == dbs_data->ignore_nice_load) /* nothing to do */ |
203 | return count; | 199 | return count; |
204 | 200 | ||
205 | cs_tuners->ignore_nice_load = input; | 201 | dbs_data->ignore_nice_load = input; |
206 | 202 | ||
207 | /* we need to re-evaluate prev_cpu_idle */ | 203 | /* we need to re-evaluate prev_cpu_idle */ |
208 | for_each_online_cpu(j) { | 204 | for_each_online_cpu(j) { |
@@ -210,7 +206,7 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, | |||
210 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); | 206 | dbs_info = &per_cpu(cs_cpu_dbs_info, j); |
211 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, | 207 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
212 | &dbs_info->cdbs.prev_cpu_wall, 0); | 208 | &dbs_info->cdbs.prev_cpu_wall, 0); |
213 | if (cs_tuners->ignore_nice_load) | 209 | if (dbs_data->ignore_nice_load) |
214 | dbs_info->cdbs.prev_cpu_nice = | 210 | dbs_info->cdbs.prev_cpu_nice = |
215 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 211 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
216 | } | 212 | } |
@@ -239,12 +235,12 @@ static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf, | |||
239 | return count; | 235 | return count; |
240 | } | 236 | } |
241 | 237 | ||
242 | show_store_one(cs, sampling_rate); | ||
243 | show_store_one(cs, sampling_down_factor); | ||
244 | show_store_one(cs, up_threshold); | ||
245 | show_store_one(cs, down_threshold); | 238 | show_store_one(cs, down_threshold); |
246 | show_store_one(cs, ignore_nice_load); | ||
247 | show_store_one(cs, freq_step); | 239 | show_store_one(cs, freq_step); |
240 | show_store_one_common(cs, sampling_rate); | ||
241 | show_store_one_common(cs, sampling_down_factor); | ||
242 | show_store_one_common(cs, up_threshold); | ||
243 | show_store_one_common(cs, ignore_nice_load); | ||
248 | show_one_common(cs, min_sampling_rate); | 244 | show_one_common(cs, min_sampling_rate); |
249 | 245 | ||
250 | gov_sys_pol_attr_rw(sampling_rate); | 246 | gov_sys_pol_attr_rw(sampling_rate); |
@@ -299,11 +295,11 @@ static int cs_init(struct dbs_data *dbs_data, bool notify) | |||
299 | return -ENOMEM; | 295 | return -ENOMEM; |
300 | } | 296 | } |
301 | 297 | ||
302 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | ||
303 | tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; | 298 | tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD; |
304 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | ||
305 | tuners->ignore_nice_load = 0; | ||
306 | tuners->freq_step = DEF_FREQUENCY_STEP; | 299 | tuners->freq_step = DEF_FREQUENCY_STEP; |
300 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | ||
301 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | ||
302 | dbs_data->ignore_nice_load = 0; | ||
307 | 303 | ||
308 | dbs_data->tuners = tuners; | 304 | dbs_data->tuners = tuners; |
309 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | 305 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index d6bd402a3237..3569782771ef 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c | |||
@@ -38,10 +38,9 @@ void dbs_check_cpu(struct cpufreq_policy *policy) | |||
38 | struct policy_dbs_info *policy_dbs = policy->governor_data; | 38 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
39 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | 39 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
40 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 40 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
41 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 41 | unsigned int sampling_rate = dbs_data->sampling_rate; |
42 | unsigned int sampling_rate; | 42 | unsigned int ignore_nice = dbs_data->ignore_nice_load; |
43 | unsigned int max_load = 0; | 43 | unsigned int max_load = 0; |
44 | unsigned int ignore_nice; | ||
45 | unsigned int j; | 44 | unsigned int j; |
46 | 45 | ||
47 | if (gov->governor == GOV_ONDEMAND) { | 46 | if (gov->governor == GOV_ONDEMAND) { |
@@ -54,13 +53,8 @@ void dbs_check_cpu(struct cpufreq_policy *policy) | |||
54 | * the 'sampling_rate', so as to keep the wake-up-from-idle | 53 | * the 'sampling_rate', so as to keep the wake-up-from-idle |
55 | * detection logic a bit conservative. | 54 | * detection logic a bit conservative. |
56 | */ | 55 | */ |
57 | sampling_rate = od_tuners->sampling_rate; | ||
58 | sampling_rate *= od_dbs_info->rate_mult; | 56 | sampling_rate *= od_dbs_info->rate_mult; |
59 | 57 | ||
60 | ignore_nice = od_tuners->ignore_nice_load; | ||
61 | } else { | ||
62 | sampling_rate = cs_tuners->sampling_rate; | ||
63 | ignore_nice = cs_tuners->ignore_nice_load; | ||
64 | } | 58 | } |
65 | 59 | ||
66 | /* Get Absolute Load */ | 60 | /* Get Absolute Load */ |
@@ -280,19 +274,6 @@ static void dbs_update_util_handler(struct update_util_data *data, u64 time, | |||
280 | atomic_dec(&policy_dbs->work_count); | 274 | atomic_dec(&policy_dbs->work_count); |
281 | } | 275 | } |
282 | 276 | ||
283 | static void set_sampling_rate(struct dbs_data *dbs_data, | ||
284 | struct dbs_governor *gov, | ||
285 | unsigned int sampling_rate) | ||
286 | { | ||
287 | if (gov->governor == GOV_CONSERVATIVE) { | ||
288 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | ||
289 | cs_tuners->sampling_rate = sampling_rate; | ||
290 | } else { | ||
291 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
292 | od_tuners->sampling_rate = sampling_rate; | ||
293 | } | ||
294 | } | ||
295 | |||
296 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, | 277 | static struct policy_dbs_info *alloc_policy_dbs_info(struct cpufreq_policy *policy, |
297 | struct dbs_governor *gov) | 278 | struct dbs_governor *gov) |
298 | { | 279 | { |
@@ -384,8 +365,8 @@ static int cpufreq_governor_init(struct cpufreq_policy *policy) | |||
384 | /* Bring kernel and HW constraints together */ | 365 | /* Bring kernel and HW constraints together */ |
385 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, | 366 | dbs_data->min_sampling_rate = max(dbs_data->min_sampling_rate, |
386 | MIN_LATENCY_MULTIPLIER * latency); | 367 | MIN_LATENCY_MULTIPLIER * latency); |
387 | set_sampling_rate(dbs_data, gov, max(dbs_data->min_sampling_rate, | 368 | dbs_data->sampling_rate = max(dbs_data->min_sampling_rate, |
388 | latency * LATENCY_MULTIPLIER)); | 369 | LATENCY_MULTIPLIER * latency); |
389 | 370 | ||
390 | if (!have_governor_per_policy()) | 371 | if (!have_governor_per_policy()) |
391 | gov->gdbs_data = dbs_data; | 372 | gov->gdbs_data = dbs_data; |
@@ -456,16 +437,12 @@ static int cpufreq_governor_start(struct cpufreq_policy *policy) | |||
456 | if (policy_dbs->policy) | 437 | if (policy_dbs->policy) |
457 | return -EBUSY; | 438 | return -EBUSY; |
458 | 439 | ||
459 | if (gov->governor == GOV_CONSERVATIVE) { | 440 | sampling_rate = dbs_data->sampling_rate; |
460 | struct cs_dbs_tuners *cs_tuners = dbs_data->tuners; | 441 | ignore_nice = dbs_data->ignore_nice_load; |
461 | 442 | ||
462 | sampling_rate = cs_tuners->sampling_rate; | 443 | if (gov->governor == GOV_ONDEMAND) { |
463 | ignore_nice = cs_tuners->ignore_nice_load; | ||
464 | } else { | ||
465 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | 444 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; |
466 | 445 | ||
467 | sampling_rate = od_tuners->sampling_rate; | ||
468 | ignore_nice = od_tuners->ignore_nice_load; | ||
469 | io_busy = od_tuners->io_is_busy; | 446 | io_busy = od_tuners->io_is_busy; |
470 | } | 447 | } |
471 | 448 | ||
diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index cdf7536ac5fb..e296362d21d2 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h | |||
@@ -153,9 +153,13 @@ static void *get_cpu_dbs_info_s(int cpu) \ | |||
153 | 153 | ||
154 | /* Governor demand based switching data (per-policy or global). */ | 154 | /* Governor demand based switching data (per-policy or global). */ |
155 | struct dbs_data { | 155 | struct dbs_data { |
156 | unsigned int min_sampling_rate; | ||
157 | int usage_count; | 156 | int usage_count; |
158 | void *tuners; | 157 | void *tuners; |
158 | unsigned int min_sampling_rate; | ||
159 | unsigned int ignore_nice_load; | ||
160 | unsigned int sampling_rate; | ||
161 | unsigned int sampling_down_factor; | ||
162 | unsigned int up_threshold; | ||
159 | }; | 163 | }; |
160 | 164 | ||
161 | /* Common to all CPUs of a policy */ | 165 | /* Common to all CPUs of a policy */ |
@@ -216,19 +220,11 @@ struct cs_cpu_dbs_info_s { | |||
216 | 220 | ||
217 | /* Per policy Governors sysfs tunables */ | 221 | /* Per policy Governors sysfs tunables */ |
218 | struct od_dbs_tuners { | 222 | struct od_dbs_tuners { |
219 | unsigned int ignore_nice_load; | ||
220 | unsigned int sampling_rate; | ||
221 | unsigned int sampling_down_factor; | ||
222 | unsigned int up_threshold; | ||
223 | unsigned int powersave_bias; | 223 | unsigned int powersave_bias; |
224 | unsigned int io_is_busy; | 224 | unsigned int io_is_busy; |
225 | }; | 225 | }; |
226 | 226 | ||
227 | struct cs_dbs_tuners { | 227 | struct cs_dbs_tuners { |
228 | unsigned int ignore_nice_load; | ||
229 | unsigned int sampling_rate; | ||
230 | unsigned int sampling_down_factor; | ||
231 | unsigned int up_threshold; | ||
232 | unsigned int down_threshold; | 228 | unsigned int down_threshold; |
233 | unsigned int freq_step; | 229 | unsigned int freq_step; |
234 | }; | 230 | }; |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 8c44bc3fffc5..13c64b662fa1 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -110,7 +110,7 @@ static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, | |||
110 | dbs_info->freq_lo_jiffies = 0; | 110 | dbs_info->freq_lo_jiffies = 0; |
111 | return freq_lo; | 111 | return freq_lo; |
112 | } | 112 | } |
113 | jiffies_total = usecs_to_jiffies(od_tuners->sampling_rate); | 113 | jiffies_total = usecs_to_jiffies(dbs_data->sampling_rate); |
114 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; | 114 | jiffies_hi = (freq_avg - freq_lo) * jiffies_total; |
115 | jiffies_hi += ((freq_hi - freq_lo) / 2); | 115 | jiffies_hi += ((freq_hi - freq_lo) / 2); |
116 | jiffies_hi /= (freq_hi - freq_lo); | 116 | jiffies_hi /= (freq_hi - freq_lo); |
@@ -161,11 +161,10 @@ static void od_check_cpu(int cpu, unsigned int load) | |||
161 | dbs_info->freq_lo = 0; | 161 | dbs_info->freq_lo = 0; |
162 | 162 | ||
163 | /* Check for frequency increase */ | 163 | /* Check for frequency increase */ |
164 | if (load > od_tuners->up_threshold) { | 164 | if (load > dbs_data->up_threshold) { |
165 | /* If switching to max speed, apply sampling_down_factor */ | 165 | /* If switching to max speed, apply sampling_down_factor */ |
166 | if (policy->cur < policy->max) | 166 | if (policy->cur < policy->max) |
167 | dbs_info->rate_mult = | 167 | dbs_info->rate_mult = dbs_data->sampling_down_factor; |
168 | od_tuners->sampling_down_factor; | ||
169 | dbs_freq_increase(policy, policy->max); | 168 | dbs_freq_increase(policy, policy->max); |
170 | } else { | 169 | } else { |
171 | /* Calculate the next frequency proportional to load */ | 170 | /* Calculate the next frequency proportional to load */ |
@@ -195,7 +194,6 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy) | |||
195 | struct policy_dbs_info *policy_dbs = policy->governor_data; | 194 | struct policy_dbs_info *policy_dbs = policy->governor_data; |
196 | struct dbs_data *dbs_data = policy_dbs->dbs_data; | 195 | struct dbs_data *dbs_data = policy_dbs->dbs_data; |
197 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); | 196 | struct od_cpu_dbs_info_s *dbs_info = &per_cpu(od_cpu_dbs_info, policy->cpu); |
198 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
199 | int delay = 0, sample_type = dbs_info->sample_type; | 197 | int delay = 0, sample_type = dbs_info->sample_type; |
200 | 198 | ||
201 | /* Common NORMAL_SAMPLE setup */ | 199 | /* Common NORMAL_SAMPLE setup */ |
@@ -214,7 +212,7 @@ static unsigned int od_dbs_timer(struct cpufreq_policy *policy) | |||
214 | } | 212 | } |
215 | 213 | ||
216 | if (!delay) | 214 | if (!delay) |
217 | delay = delay_for_sampling_rate(od_tuners->sampling_rate | 215 | delay = delay_for_sampling_rate(dbs_data->sampling_rate |
218 | * dbs_info->rate_mult); | 216 | * dbs_info->rate_mult); |
219 | 217 | ||
220 | return delay; | 218 | return delay; |
@@ -239,11 +237,10 @@ static struct dbs_governor od_dbs_gov; | |||
239 | static void update_sampling_rate(struct dbs_data *dbs_data, | 237 | static void update_sampling_rate(struct dbs_data *dbs_data, |
240 | unsigned int new_rate) | 238 | unsigned int new_rate) |
241 | { | 239 | { |
242 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
243 | struct cpumask cpumask; | 240 | struct cpumask cpumask; |
244 | int cpu; | 241 | int cpu; |
245 | 242 | ||
246 | od_tuners->sampling_rate = new_rate = max(new_rate, | 243 | dbs_data->sampling_rate = new_rate = max(new_rate, |
247 | dbs_data->min_sampling_rate); | 244 | dbs_data->min_sampling_rate); |
248 | 245 | ||
249 | /* | 246 | /* |
@@ -348,7 +345,6 @@ static ssize_t store_io_is_busy(struct dbs_data *dbs_data, const char *buf, | |||
348 | static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, | 345 | static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, |
349 | size_t count) | 346 | size_t count) |
350 | { | 347 | { |
351 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
352 | unsigned int input; | 348 | unsigned int input; |
353 | int ret; | 349 | int ret; |
354 | ret = sscanf(buf, "%u", &input); | 350 | ret = sscanf(buf, "%u", &input); |
@@ -358,21 +354,20 @@ static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf, | |||
358 | return -EINVAL; | 354 | return -EINVAL; |
359 | } | 355 | } |
360 | 356 | ||
361 | od_tuners->up_threshold = input; | 357 | dbs_data->up_threshold = input; |
362 | return count; | 358 | return count; |
363 | } | 359 | } |
364 | 360 | ||
365 | static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, | 361 | static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data, |
366 | const char *buf, size_t count) | 362 | const char *buf, size_t count) |
367 | { | 363 | { |
368 | struct od_dbs_tuners *od_tuners = dbs_data->tuners; | ||
369 | unsigned int input, j; | 364 | unsigned int input, j; |
370 | int ret; | 365 | int ret; |
371 | ret = sscanf(buf, "%u", &input); | 366 | ret = sscanf(buf, "%u", &input); |
372 | 367 | ||
373 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | 368 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) |
374 | return -EINVAL; | 369 | return -EINVAL; |
375 | od_tuners->sampling_down_factor = input; | 370 | dbs_data->sampling_down_factor = input; |
376 | 371 | ||
377 | /* Reset down sampling multiplier in case it was active */ | 372 | /* Reset down sampling multiplier in case it was active */ |
378 | for_each_online_cpu(j) { | 373 | for_each_online_cpu(j) { |
@@ -399,10 +394,10 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, | |||
399 | if (input > 1) | 394 | if (input > 1) |
400 | input = 1; | 395 | input = 1; |
401 | 396 | ||
402 | if (input == od_tuners->ignore_nice_load) { /* nothing to do */ | 397 | if (input == dbs_data->ignore_nice_load) { /* nothing to do */ |
403 | return count; | 398 | return count; |
404 | } | 399 | } |
405 | od_tuners->ignore_nice_load = input; | 400 | dbs_data->ignore_nice_load = input; |
406 | 401 | ||
407 | /* we need to re-evaluate prev_cpu_idle */ | 402 | /* we need to re-evaluate prev_cpu_idle */ |
408 | for_each_online_cpu(j) { | 403 | for_each_online_cpu(j) { |
@@ -410,7 +405,7 @@ static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data, | |||
410 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | 405 | dbs_info = &per_cpu(od_cpu_dbs_info, j); |
411 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, | 406 | dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j, |
412 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); | 407 | &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy); |
413 | if (od_tuners->ignore_nice_load) | 408 | if (dbs_data->ignore_nice_load) |
414 | dbs_info->cdbs.prev_cpu_nice = | 409 | dbs_info->cdbs.prev_cpu_nice = |
415 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; | 410 | kcpustat_cpu(j).cpustat[CPUTIME_NICE]; |
416 | 411 | ||
@@ -437,12 +432,12 @@ static ssize_t store_powersave_bias(struct dbs_data *dbs_data, const char *buf, | |||
437 | return count; | 432 | return count; |
438 | } | 433 | } |
439 | 434 | ||
440 | show_store_one(od, sampling_rate); | ||
441 | show_store_one(od, io_is_busy); | 435 | show_store_one(od, io_is_busy); |
442 | show_store_one(od, up_threshold); | ||
443 | show_store_one(od, sampling_down_factor); | ||
444 | show_store_one(od, ignore_nice_load); | ||
445 | show_store_one(od, powersave_bias); | 436 | show_store_one(od, powersave_bias); |
437 | show_store_one_common(od, sampling_rate); | ||
438 | show_store_one_common(od, up_threshold); | ||
439 | show_store_one_common(od, sampling_down_factor); | ||
440 | show_store_one_common(od, ignore_nice_load); | ||
446 | show_one_common(od, min_sampling_rate); | 441 | show_one_common(od, min_sampling_rate); |
447 | 442 | ||
448 | gov_sys_pol_attr_rw(sampling_rate); | 443 | gov_sys_pol_attr_rw(sampling_rate); |
@@ -504,7 +499,7 @@ static int od_init(struct dbs_data *dbs_data, bool notify) | |||
504 | put_cpu(); | 499 | put_cpu(); |
505 | if (idle_time != -1ULL) { | 500 | if (idle_time != -1ULL) { |
506 | /* Idle micro accounting is supported. Use finer thresholds */ | 501 | /* Idle micro accounting is supported. Use finer thresholds */ |
507 | tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | 502 | dbs_data->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; |
508 | /* | 503 | /* |
509 | * In nohz/micro accounting case we set the minimum frequency | 504 | * In nohz/micro accounting case we set the minimum frequency |
510 | * not depending on HZ, but fixed (very low). The deferred | 505 | * not depending on HZ, but fixed (very low). The deferred |
@@ -512,15 +507,15 @@ static int od_init(struct dbs_data *dbs_data, bool notify) | |||
512 | */ | 507 | */ |
513 | dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; | 508 | dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; |
514 | } else { | 509 | } else { |
515 | tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; | 510 | dbs_data->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; |
516 | 511 | ||
517 | /* For correct statistics, we need 10 ticks for each measure */ | 512 | /* For correct statistics, we need 10 ticks for each measure */ |
518 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * | 513 | dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * |
519 | jiffies_to_usecs(10); | 514 | jiffies_to_usecs(10); |
520 | } | 515 | } |
521 | 516 | ||
522 | tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; | 517 | dbs_data->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; |
523 | tuners->ignore_nice_load = 0; | 518 | dbs_data->ignore_nice_load = 0; |
524 | tuners->powersave_bias = default_powersave_bias; | 519 | tuners->powersave_bias = default_powersave_bias; |
525 | tuners->io_is_busy = should_io_be_busy(); | 520 | tuners->io_is_busy = should_io_be_busy(); |
526 | 521 | ||