aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 20:42:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 20:42:14 -0400
commit61ef46fd45c3c62dc7c880a45dd2aa841b9af8fb (patch)
tree2c6b7a4357ba85f7be027bd492da9bf8d7c6acb2 /drivers
parent77aa56ba09b7416764aec2e3f7b41e023cf30602 (diff)
parentbdce2595a2f539c6fdedd8f2bd281326b627bba3 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] pcc-cpufreq: remove duplicate statements [CPUFREQ] Remove the pm_message_t argument from driver suspend [CPUFREQ] Remove unneeded locks [CPUFREQ] Remove old, deprecated per cpu ondemand/conservative sysfs files [CPUFREQ] Remove deprecated sysfs file sampling_rate_max [CPUFREQ] powernow-k8: The table index is not worth displaying [CPUFREQ] calculate delay after dbs_check_cpu [CPUFREQ] Add documentation for sampling_down_factor [CPUFREQ] drivers/cpufreq: Remove unnecessary semicolons
Diffstat (limited to 'drivers')
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c123
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c122
3 files changed, 19 insertions, 228 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5cb4d09919d6..0f17ad8585d7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1371 goto out; 1371 goto out;
1372 1372
1373 if (cpufreq_driver->suspend) { 1373 if (cpufreq_driver->suspend) {
1374 ret = cpufreq_driver->suspend(cpu_policy, pmsg); 1374 ret = cpufreq_driver->suspend(cpu_policy);
1375 if (ret) 1375 if (ret)
1376 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1376 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1377 "step on CPU %u\n", cpu_policy->cpu); 1377 "step on CPU %u\n", cpu_policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 94284c8473b1..33b56e5c5c14 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
76static unsigned int dbs_enable; /* number of CPUs using this policy */ 76static unsigned int dbs_enable; /* number of CPUs using this policy */
77 77
78/* 78/*
79 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on 79 * dbs_mutex protects dbs_enable in governor start/stop.
80 * different CPUs. It protects dbs_enable in governor start/stop.
81 */ 80 */
82static DEFINE_MUTEX(dbs_mutex); 81static DEFINE_MUTEX(dbs_mutex);
83 82
@@ -116,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
116 if (wall) 115 if (wall)
117 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); 116 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
118 117
119 return (cputime64_t)jiffies_to_usecs(idle_time);; 118 return (cputime64_t)jiffies_to_usecs(idle_time);
120} 119}
121 120
122static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 121static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -162,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
162}; 161};
163 162
164/************************** sysfs interface ************************/ 163/************************** sysfs interface ************************/
165static ssize_t show_sampling_rate_max(struct kobject *kobj,
166 struct attribute *attr, char *buf)
167{
168 printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
169 "sysfs file is deprecated - used by: %s\n", current->comm);
170 return sprintf(buf, "%u\n", -1U);
171}
172
173static ssize_t show_sampling_rate_min(struct kobject *kobj, 164static ssize_t show_sampling_rate_min(struct kobject *kobj,
174 struct attribute *attr, char *buf) 165 struct attribute *attr, char *buf)
175{ 166{
176 return sprintf(buf, "%u\n", min_sampling_rate); 167 return sprintf(buf, "%u\n", min_sampling_rate);
177} 168}
178 169
179define_one_global_ro(sampling_rate_max);
180define_one_global_ro(sampling_rate_min); 170define_one_global_ro(sampling_rate_min);
181 171
182/* cpufreq_conservative Governor Tunables */ 172/* cpufreq_conservative Governor Tunables */
@@ -193,33 +183,6 @@ show_one(down_threshold, down_threshold);
193show_one(ignore_nice_load, ignore_nice); 183show_one(ignore_nice_load, ignore_nice);
194show_one(freq_step, freq_step); 184show_one(freq_step, freq_step);
195 185
196/*** delete after deprecation time ***/
197#define DEPRECATION_MSG(file_name) \
198 printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
199 "interface is deprecated - " #file_name "\n");
200
201#define show_one_old(file_name) \
202static ssize_t show_##file_name##_old \
203(struct cpufreq_policy *unused, char *buf) \
204{ \
205 printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
206 "interface is deprecated - " #file_name "\n"); \
207 return show_##file_name(NULL, NULL, buf); \
208}
209show_one_old(sampling_rate);
210show_one_old(sampling_down_factor);
211show_one_old(up_threshold);
212show_one_old(down_threshold);
213show_one_old(ignore_nice_load);
214show_one_old(freq_step);
215show_one_old(sampling_rate_min);
216show_one_old(sampling_rate_max);
217
218cpufreq_freq_attr_ro_old(sampling_rate_min);
219cpufreq_freq_attr_ro_old(sampling_rate_max);
220
221/*** delete after deprecation time ***/
222
223static ssize_t store_sampling_down_factor(struct kobject *a, 186static ssize_t store_sampling_down_factor(struct kobject *a,
224 struct attribute *b, 187 struct attribute *b,
225 const char *buf, size_t count) 188 const char *buf, size_t count)
@@ -231,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
231 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 194 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
232 return -EINVAL; 195 return -EINVAL;
233 196
234 mutex_lock(&dbs_mutex);
235 dbs_tuners_ins.sampling_down_factor = input; 197 dbs_tuners_ins.sampling_down_factor = input;
236 mutex_unlock(&dbs_mutex);
237
238 return count; 198 return count;
239} 199}
240 200
@@ -248,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
248 if (ret != 1) 208 if (ret != 1)
249 return -EINVAL; 209 return -EINVAL;
250 210
251 mutex_lock(&dbs_mutex);
252 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 211 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
253 mutex_unlock(&dbs_mutex);
254
255 return count; 212 return count;
256} 213}
257 214
@@ -262,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
262 int ret; 219 int ret;
263 ret = sscanf(buf, "%u", &input); 220 ret = sscanf(buf, "%u", &input);
264 221
265 mutex_lock(&dbs_mutex);
266 if (ret != 1 || input > 100 || 222 if (ret != 1 || input > 100 ||
267 input <= dbs_tuners_ins.down_threshold) { 223 input <= dbs_tuners_ins.down_threshold)
268 mutex_unlock(&dbs_mutex);
269 return -EINVAL; 224 return -EINVAL;
270 }
271 225
272 dbs_tuners_ins.up_threshold = input; 226 dbs_tuners_ins.up_threshold = input;
273 mutex_unlock(&dbs_mutex);
274
275 return count; 227 return count;
276} 228}
277 229
@@ -282,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
282 int ret; 234 int ret;
283 ret = sscanf(buf, "%u", &input); 235 ret = sscanf(buf, "%u", &input);
284 236
285 mutex_lock(&dbs_mutex);
286 /* cannot be lower than 11 otherwise freq will not fall */ 237 /* cannot be lower than 11 otherwise freq will not fall */
287 if (ret != 1 || input < 11 || input > 100 || 238 if (ret != 1 || input < 11 || input > 100 ||
288 input >= dbs_tuners_ins.up_threshold) { 239 input >= dbs_tuners_ins.up_threshold)
289 mutex_unlock(&dbs_mutex);
290 return -EINVAL; 240 return -EINVAL;
291 }
292 241
293 dbs_tuners_ins.down_threshold = input; 242 dbs_tuners_ins.down_threshold = input;
294 mutex_unlock(&dbs_mutex);
295
296 return count; 243 return count;
297} 244}
298 245
@@ -311,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
311 if (input > 1) 258 if (input > 1)
312 input = 1; 259 input = 1;
313 260
314 mutex_lock(&dbs_mutex); 261 if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
315 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
316 mutex_unlock(&dbs_mutex);
317 return count; 262 return count;
318 } 263
319 dbs_tuners_ins.ignore_nice = input; 264 dbs_tuners_ins.ignore_nice = input;
320 265
321 /* we need to re-evaluate prev_cpu_idle */ 266 /* we need to re-evaluate prev_cpu_idle */
@@ -327,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
327 if (dbs_tuners_ins.ignore_nice) 272 if (dbs_tuners_ins.ignore_nice)
328 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 273 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
329 } 274 }
330 mutex_unlock(&dbs_mutex);
331
332 return count; 275 return count;
333} 276}
334 277
@@ -347,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
347 290
348 /* no need to test here if freq_step is zero as the user might actually 291 /* no need to test here if freq_step is zero as the user might actually
349 * want this, they would be crazy though :) */ 292 * want this, they would be crazy though :) */
350 mutex_lock(&dbs_mutex);
351 dbs_tuners_ins.freq_step = input; 293 dbs_tuners_ins.freq_step = input;
352 mutex_unlock(&dbs_mutex);
353
354 return count; 294 return count;
355} 295}
356 296
@@ -362,7 +302,6 @@ define_one_global_rw(ignore_nice_load);
362define_one_global_rw(freq_step); 302define_one_global_rw(freq_step);
363 303
364static struct attribute *dbs_attributes[] = { 304static struct attribute *dbs_attributes[] = {
365 &sampling_rate_max.attr,
366 &sampling_rate_min.attr, 305 &sampling_rate_min.attr,
367 &sampling_rate.attr, 306 &sampling_rate.attr,
368 &sampling_down_factor.attr, 307 &sampling_down_factor.attr,
@@ -378,49 +317,6 @@ static struct attribute_group dbs_attr_group = {
378 .name = "conservative", 317 .name = "conservative",
379}; 318};
380 319
381/*** delete after deprecation time ***/
382
383#define write_one_old(file_name) \
384static ssize_t store_##file_name##_old \
385(struct cpufreq_policy *unused, const char *buf, size_t count) \
386{ \
387 printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
388 "interface is deprecated - " #file_name "\n"); \
389 return store_##file_name(NULL, NULL, buf, count); \
390}
391write_one_old(sampling_rate);
392write_one_old(sampling_down_factor);
393write_one_old(up_threshold);
394write_one_old(down_threshold);
395write_one_old(ignore_nice_load);
396write_one_old(freq_step);
397
398cpufreq_freq_attr_rw_old(sampling_rate);
399cpufreq_freq_attr_rw_old(sampling_down_factor);
400cpufreq_freq_attr_rw_old(up_threshold);
401cpufreq_freq_attr_rw_old(down_threshold);
402cpufreq_freq_attr_rw_old(ignore_nice_load);
403cpufreq_freq_attr_rw_old(freq_step);
404
405static struct attribute *dbs_attributes_old[] = {
406 &sampling_rate_max_old.attr,
407 &sampling_rate_min_old.attr,
408 &sampling_rate_old.attr,
409 &sampling_down_factor_old.attr,
410 &up_threshold_old.attr,
411 &down_threshold_old.attr,
412 &ignore_nice_load_old.attr,
413 &freq_step_old.attr,
414 NULL
415};
416
417static struct attribute_group dbs_attr_group_old = {
418 .attrs = dbs_attributes_old,
419 .name = "conservative",
420};
421
422/*** delete after deprecation time ***/
423
424/************************** sysfs end ************************/ 320/************************** sysfs end ************************/
425 321
426static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 322static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -596,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
596 492
597 mutex_lock(&dbs_mutex); 493 mutex_lock(&dbs_mutex);
598 494
599 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
600 if (rc) {
601 mutex_unlock(&dbs_mutex);
602 return rc;
603 }
604
605 for_each_cpu(j, policy->cpus) { 495 for_each_cpu(j, policy->cpus) {
606 struct cpu_dbs_info_s *j_dbs_info; 496 struct cpu_dbs_info_s *j_dbs_info;
607 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); 497 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
@@ -664,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
664 dbs_timer_exit(this_dbs_info); 554 dbs_timer_exit(this_dbs_info);
665 555
666 mutex_lock(&dbs_mutex); 556 mutex_lock(&dbs_mutex);
667 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
668 dbs_enable--; 557 dbs_enable--;
669 mutex_destroy(&this_dbs_info->timer_mutex); 558 mutex_destroy(&this_dbs_info->timer_mutex);
670 559
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 58aa85ea5ec6..891360edecdd 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
99static unsigned int dbs_enable; /* number of CPUs using this policy */ 99static unsigned int dbs_enable; /* number of CPUs using this policy */
100 100
101/* 101/*
102 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on 102 * dbs_mutex protects dbs_enable in governor start/stop.
103 * different CPUs. It protects dbs_enable in governor start/stop.
104 */ 103 */
105static DEFINE_MUTEX(dbs_mutex); 104static DEFINE_MUTEX(dbs_mutex);
106 105
@@ -235,21 +234,12 @@ static void ondemand_powersave_bias_init(void)
235 234
236/************************** sysfs interface ************************/ 235/************************** sysfs interface ************************/
237 236
238static ssize_t show_sampling_rate_max(struct kobject *kobj,
239 struct attribute *attr, char *buf)
240{
241 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
242 "sysfs file is deprecated - used by: %s\n", current->comm);
243 return sprintf(buf, "%u\n", -1U);
244}
245
246static ssize_t show_sampling_rate_min(struct kobject *kobj, 237static ssize_t show_sampling_rate_min(struct kobject *kobj,
247 struct attribute *attr, char *buf) 238 struct attribute *attr, char *buf)
248{ 239{
249 return sprintf(buf, "%u\n", min_sampling_rate); 240 return sprintf(buf, "%u\n", min_sampling_rate);
250} 241}
251 242
252define_one_global_ro(sampling_rate_max);
253define_one_global_ro(sampling_rate_min); 243define_one_global_ro(sampling_rate_min);
254 244
255/* cpufreq_ondemand Governor Tunables */ 245/* cpufreq_ondemand Governor Tunables */
@@ -266,32 +256,6 @@ show_one(sampling_down_factor, sampling_down_factor);
266show_one(ignore_nice_load, ignore_nice); 256show_one(ignore_nice_load, ignore_nice);
267show_one(powersave_bias, powersave_bias); 257show_one(powersave_bias, powersave_bias);
268 258
269/*** delete after deprecation time ***/
270
271#define DEPRECATION_MSG(file_name) \
272 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
273 "interface is deprecated - " #file_name "\n");
274
275#define show_one_old(file_name) \
276static ssize_t show_##file_name##_old \
277(struct cpufreq_policy *unused, char *buf) \
278{ \
279 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
280 "interface is deprecated - " #file_name "\n"); \
281 return show_##file_name(NULL, NULL, buf); \
282}
283show_one_old(sampling_rate);
284show_one_old(up_threshold);
285show_one_old(ignore_nice_load);
286show_one_old(powersave_bias);
287show_one_old(sampling_rate_min);
288show_one_old(sampling_rate_max);
289
290cpufreq_freq_attr_ro_old(sampling_rate_min);
291cpufreq_freq_attr_ro_old(sampling_rate_max);
292
293/*** delete after deprecation time ***/
294
295static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 259static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
296 const char *buf, size_t count) 260 const char *buf, size_t count)
297{ 261{
@@ -300,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
300 ret = sscanf(buf, "%u", &input); 264 ret = sscanf(buf, "%u", &input);
301 if (ret != 1) 265 if (ret != 1)
302 return -EINVAL; 266 return -EINVAL;
303
304 mutex_lock(&dbs_mutex);
305 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 267 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
306 mutex_unlock(&dbs_mutex);
307
308 return count; 268 return count;
309} 269}
310 270
@@ -317,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
317 ret = sscanf(buf, "%u", &input); 277 ret = sscanf(buf, "%u", &input);
318 if (ret != 1) 278 if (ret != 1)
319 return -EINVAL; 279 return -EINVAL;
320
321 mutex_lock(&dbs_mutex);
322 dbs_tuners_ins.io_is_busy = !!input; 280 dbs_tuners_ins.io_is_busy = !!input;
323 mutex_unlock(&dbs_mutex);
324
325 return count; 281 return count;
326} 282}
327 283
@@ -336,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
336 input < MIN_FREQUENCY_UP_THRESHOLD) { 292 input < MIN_FREQUENCY_UP_THRESHOLD) {
337 return -EINVAL; 293 return -EINVAL;
338 } 294 }
339
340 mutex_lock(&dbs_mutex);
341 dbs_tuners_ins.up_threshold = input; 295 dbs_tuners_ins.up_threshold = input;
342 mutex_unlock(&dbs_mutex);
343
344 return count; 296 return count;
345} 297}
346 298
@@ -353,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
353 305
354 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 306 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
355 return -EINVAL; 307 return -EINVAL;
356 mutex_lock(&dbs_mutex);
357 dbs_tuners_ins.sampling_down_factor = input; 308 dbs_tuners_ins.sampling_down_factor = input;
358 309
359 /* Reset down sampling multiplier in case it was active */ 310 /* Reset down sampling multiplier in case it was active */
@@ -362,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
362 dbs_info = &per_cpu(od_cpu_dbs_info, j); 313 dbs_info = &per_cpu(od_cpu_dbs_info, j);
363 dbs_info->rate_mult = 1; 314 dbs_info->rate_mult = 1;
364 } 315 }
365 mutex_unlock(&dbs_mutex);
366
367 return count; 316 return count;
368} 317}
369 318
@@ -382,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
382 if (input > 1) 331 if (input > 1)
383 input = 1; 332 input = 1;
384 333
385 mutex_lock(&dbs_mutex);
386 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 334 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
387 mutex_unlock(&dbs_mutex);
388 return count; 335 return count;
389 } 336 }
390 dbs_tuners_ins.ignore_nice = input; 337 dbs_tuners_ins.ignore_nice = input;
@@ -399,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
399 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 346 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
400 347
401 } 348 }
402 mutex_unlock(&dbs_mutex);
403
404 return count; 349 return count;
405} 350}
406 351
@@ -417,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
417 if (input > 1000) 362 if (input > 1000)
418 input = 1000; 363 input = 1000;
419 364
420 mutex_lock(&dbs_mutex);
421 dbs_tuners_ins.powersave_bias = input; 365 dbs_tuners_ins.powersave_bias = input;
422 ondemand_powersave_bias_init(); 366 ondemand_powersave_bias_init();
423 mutex_unlock(&dbs_mutex);
424
425 return count; 367 return count;
426} 368}
427 369
@@ -433,7 +375,6 @@ define_one_global_rw(ignore_nice_load);
433define_one_global_rw(powersave_bias); 375define_one_global_rw(powersave_bias);
434 376
435static struct attribute *dbs_attributes[] = { 377static struct attribute *dbs_attributes[] = {
436 &sampling_rate_max.attr,
437 &sampling_rate_min.attr, 378 &sampling_rate_min.attr,
438 &sampling_rate.attr, 379 &sampling_rate.attr,
439 &up_threshold.attr, 380 &up_threshold.attr,
@@ -449,43 +390,6 @@ static struct attribute_group dbs_attr_group = {
449 .name = "ondemand", 390 .name = "ondemand",
450}; 391};
451 392
452/*** delete after deprecation time ***/
453
454#define write_one_old(file_name) \
455static ssize_t store_##file_name##_old \
456(struct cpufreq_policy *unused, const char *buf, size_t count) \
457{ \
458 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
459 "interface is deprecated - " #file_name "\n"); \
460 return store_##file_name(NULL, NULL, buf, count); \
461}
462write_one_old(sampling_rate);
463write_one_old(up_threshold);
464write_one_old(ignore_nice_load);
465write_one_old(powersave_bias);
466
467cpufreq_freq_attr_rw_old(sampling_rate);
468cpufreq_freq_attr_rw_old(up_threshold);
469cpufreq_freq_attr_rw_old(ignore_nice_load);
470cpufreq_freq_attr_rw_old(powersave_bias);
471
472static struct attribute *dbs_attributes_old[] = {
473 &sampling_rate_max_old.attr,
474 &sampling_rate_min_old.attr,
475 &sampling_rate_old.attr,
476 &up_threshold_old.attr,
477 &ignore_nice_load_old.attr,
478 &powersave_bias_old.attr,
479 NULL
480};
481
482static struct attribute_group dbs_attr_group_old = {
483 .attrs = dbs_attributes_old,
484 .name = "ondemand",
485};
486
487/*** delete after deprecation time ***/
488
489/************************** sysfs end ************************/ 393/************************** sysfs end ************************/
490 394
491static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) 395static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -642,12 +546,7 @@ static void do_dbs_timer(struct work_struct *work)
642 unsigned int cpu = dbs_info->cpu; 546 unsigned int cpu = dbs_info->cpu;
643 int sample_type = dbs_info->sample_type; 547 int sample_type = dbs_info->sample_type;
644 548
645 /* We want all CPUs to do sampling nearly on same jiffy */ 549 int delay;
646 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
647 * dbs_info->rate_mult);
648
649 if (num_online_cpus() > 1)
650 delay -= jiffies % delay;
651 550
652 mutex_lock(&dbs_info->timer_mutex); 551 mutex_lock(&dbs_info->timer_mutex);
653 552
@@ -660,10 +559,20 @@ static void do_dbs_timer(struct work_struct *work)
660 /* Setup timer for SUB_SAMPLE */ 559 /* Setup timer for SUB_SAMPLE */
661 dbs_info->sample_type = DBS_SUB_SAMPLE; 560 dbs_info->sample_type = DBS_SUB_SAMPLE;
662 delay = dbs_info->freq_hi_jiffies; 561 delay = dbs_info->freq_hi_jiffies;
562 } else {
563 /* We want all CPUs to do sampling nearly on
564 * same jiffy
565 */
566 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
567 * dbs_info->rate_mult);
568
569 if (num_online_cpus() > 1)
570 delay -= jiffies % delay;
663 } 571 }
664 } else { 572 } else {
665 __cpufreq_driver_target(dbs_info->cur_policy, 573 __cpufreq_driver_target(dbs_info->cur_policy,
666 dbs_info->freq_lo, CPUFREQ_RELATION_H); 574 dbs_info->freq_lo, CPUFREQ_RELATION_H);
575 delay = dbs_info->freq_lo_jiffies;
667 } 576 }
668 schedule_delayed_work_on(cpu, &dbs_info->work, delay); 577 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
669 mutex_unlock(&dbs_info->timer_mutex); 578 mutex_unlock(&dbs_info->timer_mutex);
@@ -727,12 +636,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
727 636
728 mutex_lock(&dbs_mutex); 637 mutex_lock(&dbs_mutex);
729 638
730 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
731 if (rc) {
732 mutex_unlock(&dbs_mutex);
733 return rc;
734 }
735
736 dbs_enable++; 639 dbs_enable++;
737 for_each_cpu(j, policy->cpus) { 640 for_each_cpu(j, policy->cpus) {
738 struct cpu_dbs_info_s *j_dbs_info; 641 struct cpu_dbs_info_s *j_dbs_info;
@@ -785,7 +688,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
785 dbs_timer_exit(this_dbs_info); 688 dbs_timer_exit(this_dbs_info);
786 689
787 mutex_lock(&dbs_mutex); 690 mutex_lock(&dbs_mutex);
788 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
789 mutex_destroy(&this_dbs_info->timer_mutex); 691 mutex_destroy(&this_dbs_info->timer_mutex);
790 dbs_enable--; 692 dbs_enable--;
791 mutex_unlock(&dbs_mutex); 693 mutex_unlock(&dbs_mutex);