aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 20:42:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-17 20:42:14 -0400
commit61ef46fd45c3c62dc7c880a45dd2aa841b9af8fb (patch)
tree2c6b7a4357ba85f7be027bd492da9bf8d7c6acb2
parent77aa56ba09b7416764aec2e3f7b41e023cf30602 (diff)
parentbdce2595a2f539c6fdedd8f2bd281326b627bba3 (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq: [CPUFREQ] pcc-cpufreq: remove duplicate statements [CPUFREQ] Remove the pm_message_t argument from driver suspend [CPUFREQ] Remove unneeded locks [CPUFREQ] Remove old, deprecated per cpu ondemand/conservative sysfs files [CPUFREQ] Remove deprecated sysfs file sampling_rate_max [CPUFREQ] powernow-k8: The table index is not worth displaying [CPUFREQ] calculate delay after dbs_check_cpu [CPUFREQ] Add documentation for sampling_down_factor [CPUFREQ] drivers/cpufreq: Remove unnecessary semicolons
-rw-r--r--Documentation/cpu-freq/governors.txt11
-rw-r--r--arch/arm/mach-s5pv210/cpufreq.c3
-rw-r--r--arch/arm/mach-s5pv310/cpufreq.c3
-rw-r--r--arch/arm/plat-s3c24xx/cpu-freq.c2
-rw-r--r--arch/powerpc/platforms/powermac/cpufreq_32.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c3
-rw-r--r--drivers/cpufreq/cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq_conservative.c123
-rw-r--r--drivers/cpufreq/cpufreq_ondemand.c122
-rw-r--r--include/linux/cpufreq.h11
11 files changed, 36 insertions, 248 deletions
diff --git a/Documentation/cpu-freq/governors.txt b/Documentation/cpu-freq/governors.txt
index 737988fca64d..e74d0a2eb1cf 100644
--- a/Documentation/cpu-freq/governors.txt
+++ b/Documentation/cpu-freq/governors.txt
@@ -158,6 +158,17 @@ intensive calculation on your laptop that you do not care how long it
158takes to complete as you can 'nice' it and prevent it from taking part 158takes to complete as you can 'nice' it and prevent it from taking part
159in the deciding process of whether to increase your CPU frequency. 159in the deciding process of whether to increase your CPU frequency.
160 160
161sampling_down_factor: this parameter controls the rate at which the
162kernel makes a decision on when to decrease the frequency while running
163at top speed. When set to 1 (the default) decisions to reevaluate load
164are made at the same interval regardless of current clock speed. But
165when set to greater than 1 (e.g. 100) it acts as a multiplier for the
166scheduling interval for reevaluating load when the CPU is at its top
167speed due to high load. This improves performance by reducing the overhead
168of load evaluation and helping the CPU stay at its top speed when truly
169busy, rather than shifting back and forth in speed. This tunable has no
170effect on behavior at lower speeds/lower CPU loads.
171
161 172
1622.5 Conservative 1732.5 Conservative
163---------------- 174----------------
diff --git a/arch/arm/mach-s5pv210/cpufreq.c b/arch/arm/mach-s5pv210/cpufreq.c
index a6f22920a2c2..22046e2f53c2 100644
--- a/arch/arm/mach-s5pv210/cpufreq.c
+++ b/arch/arm/mach-s5pv210/cpufreq.c
@@ -390,8 +390,7 @@ static int s5pv210_target(struct cpufreq_policy *policy,
390} 390}
391 391
392#ifdef CONFIG_PM 392#ifdef CONFIG_PM
393static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy, 393static int s5pv210_cpufreq_suspend(struct cpufreq_policy *policy)
394 pm_message_t pmsg)
395{ 394{
396 return 0; 395 return 0;
397} 396}
diff --git a/arch/arm/mach-s5pv310/cpufreq.c b/arch/arm/mach-s5pv310/cpufreq.c
index b04cbc731128..7c08ad7d8887 100644
--- a/arch/arm/mach-s5pv310/cpufreq.c
+++ b/arch/arm/mach-s5pv310/cpufreq.c
@@ -458,8 +458,7 @@ static int s5pv310_target(struct cpufreq_policy *policy,
458} 458}
459 459
460#ifdef CONFIG_PM 460#ifdef CONFIG_PM
461static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy, 461static int s5pv310_cpufreq_suspend(struct cpufreq_policy *policy)
462 pm_message_t pmsg)
463{ 462{
464 return 0; 463 return 0;
465} 464}
diff --git a/arch/arm/plat-s3c24xx/cpu-freq.c b/arch/arm/plat-s3c24xx/cpu-freq.c
index 25a8fc7f512e..eea75ff81d15 100644
--- a/arch/arm/plat-s3c24xx/cpu-freq.c
+++ b/arch/arm/plat-s3c24xx/cpu-freq.c
@@ -433,7 +433,7 @@ static int s3c_cpufreq_verify(struct cpufreq_policy *policy)
433static struct cpufreq_frequency_table suspend_pll; 433static struct cpufreq_frequency_table suspend_pll;
434static unsigned int suspend_freq; 434static unsigned int suspend_freq;
435 435
436static int s3c_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) 436static int s3c_cpufreq_suspend(struct cpufreq_policy *policy)
437{ 437{
438 suspend_pll.frequency = clk_get_rate(_clk_mpll); 438 suspend_pll.frequency = clk_get_rate(_clk_mpll);
439 suspend_pll.index = __raw_readl(S3C2410_MPLLCON); 439 suspend_pll.index = __raw_readl(S3C2410_MPLLCON);
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c
index 415ca6d6b273..04af5f48b4eb 100644
--- a/arch/powerpc/platforms/powermac/cpufreq_32.c
+++ b/arch/powerpc/platforms/powermac/cpufreq_32.c
@@ -429,7 +429,7 @@ static u32 read_gpio(struct device_node *np)
429 return offset; 429 return offset;
430} 430}
431 431
432static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) 432static int pmac_cpufreq_suspend(struct cpufreq_policy *policy)
433{ 433{
434 /* Ok, this could be made a bit smarter, but let's be robust for now. We 434 /* Ok, this could be made a bit smarter, but let's be robust for now. We
435 * always force a speed change to high speed before sleep, to make sure 435 * always force a speed change to high speed before sleep, to make sure
diff --git a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
index 4a5a42b842ad..755a31e0f5b0 100644
--- a/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/pcc-cpufreq.c
@@ -315,8 +315,6 @@ static int __init pcc_cpufreq_do_osc(acpi_handle *handle)
315 315
316 input.count = 4; 316 input.count = 4;
317 input.pointer = in_params; 317 input.pointer = in_params;
318 input.count = 4;
319 input.pointer = in_params;
320 in_params[0].type = ACPI_TYPE_BUFFER; 318 in_params[0].type = ACPI_TYPE_BUFFER;
321 in_params[0].buffer.length = 16; 319 in_params[0].buffer.length = 16;
322 in_params[0].buffer.pointer = OSC_UUID; 320 in_params[0].buffer.pointer = OSC_UUID;
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index c567dec854f6..1ae4133e6bd6 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -630,8 +630,7 @@ static void print_basics(struct powernow_k8_data *data)
630 data->powernow_table[j].frequency/1000); 630 data->powernow_table[j].frequency/1000);
631 } else { 631 } else {
632 printk(KERN_INFO PFX 632 printk(KERN_INFO PFX
633 " %d : fid 0x%x (%d MHz), vid 0x%x\n", 633 "fid 0x%x (%d MHz), vid 0x%x\n",
634 j,
635 data->powernow_table[j].index & 0xff, 634 data->powernow_table[j].index & 0xff,
636 data->powernow_table[j].frequency/1000, 635 data->powernow_table[j].frequency/1000,
637 data->powernow_table[j].index >> 8); 636 data->powernow_table[j].index >> 8);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 5cb4d09919d6..0f17ad8585d7 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1371,7 +1371,7 @@ static int cpufreq_suspend(struct sys_device *sysdev, pm_message_t pmsg)
1371 goto out; 1371 goto out;
1372 1372
1373 if (cpufreq_driver->suspend) { 1373 if (cpufreq_driver->suspend) {
1374 ret = cpufreq_driver->suspend(cpu_policy, pmsg); 1374 ret = cpufreq_driver->suspend(cpu_policy);
1375 if (ret) 1375 if (ret)
1376 printk(KERN_ERR "cpufreq: suspend failed in ->suspend " 1376 printk(KERN_ERR "cpufreq: suspend failed in ->suspend "
1377 "step on CPU %u\n", cpu_policy->cpu); 1377 "step on CPU %u\n", cpu_policy->cpu);
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c
index 94284c8473b1..33b56e5c5c14 100644
--- a/drivers/cpufreq/cpufreq_conservative.c
+++ b/drivers/cpufreq/cpufreq_conservative.c
@@ -76,8 +76,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, cs_cpu_dbs_info);
76static unsigned int dbs_enable; /* number of CPUs using this policy */ 76static unsigned int dbs_enable; /* number of CPUs using this policy */
77 77
78/* 78/*
79 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on 79 * dbs_mutex protects dbs_enable in governor start/stop.
80 * different CPUs. It protects dbs_enable in governor start/stop.
81 */ 80 */
82static DEFINE_MUTEX(dbs_mutex); 81static DEFINE_MUTEX(dbs_mutex);
83 82
@@ -116,7 +115,7 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
116 if (wall) 115 if (wall)
117 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time); 116 *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
118 117
119 return (cputime64_t)jiffies_to_usecs(idle_time);; 118 return (cputime64_t)jiffies_to_usecs(idle_time);
120} 119}
121 120
122static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) 121static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
@@ -162,21 +161,12 @@ static struct notifier_block dbs_cpufreq_notifier_block = {
162}; 161};
163 162
164/************************** sysfs interface ************************/ 163/************************** sysfs interface ************************/
165static ssize_t show_sampling_rate_max(struct kobject *kobj,
166 struct attribute *attr, char *buf)
167{
168 printk_once(KERN_INFO "CPUFREQ: conservative sampling_rate_max "
169 "sysfs file is deprecated - used by: %s\n", current->comm);
170 return sprintf(buf, "%u\n", -1U);
171}
172
173static ssize_t show_sampling_rate_min(struct kobject *kobj, 164static ssize_t show_sampling_rate_min(struct kobject *kobj,
174 struct attribute *attr, char *buf) 165 struct attribute *attr, char *buf)
175{ 166{
176 return sprintf(buf, "%u\n", min_sampling_rate); 167 return sprintf(buf, "%u\n", min_sampling_rate);
177} 168}
178 169
179define_one_global_ro(sampling_rate_max);
180define_one_global_ro(sampling_rate_min); 170define_one_global_ro(sampling_rate_min);
181 171
182/* cpufreq_conservative Governor Tunables */ 172/* cpufreq_conservative Governor Tunables */
@@ -193,33 +183,6 @@ show_one(down_threshold, down_threshold);
193show_one(ignore_nice_load, ignore_nice); 183show_one(ignore_nice_load, ignore_nice);
194show_one(freq_step, freq_step); 184show_one(freq_step, freq_step);
195 185
196/*** delete after deprecation time ***/
197#define DEPRECATION_MSG(file_name) \
198 printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
199 "interface is deprecated - " #file_name "\n");
200
201#define show_one_old(file_name) \
202static ssize_t show_##file_name##_old \
203(struct cpufreq_policy *unused, char *buf) \
204{ \
205 printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
206 "interface is deprecated - " #file_name "\n"); \
207 return show_##file_name(NULL, NULL, buf); \
208}
209show_one_old(sampling_rate);
210show_one_old(sampling_down_factor);
211show_one_old(up_threshold);
212show_one_old(down_threshold);
213show_one_old(ignore_nice_load);
214show_one_old(freq_step);
215show_one_old(sampling_rate_min);
216show_one_old(sampling_rate_max);
217
218cpufreq_freq_attr_ro_old(sampling_rate_min);
219cpufreq_freq_attr_ro_old(sampling_rate_max);
220
221/*** delete after deprecation time ***/
222
223static ssize_t store_sampling_down_factor(struct kobject *a, 186static ssize_t store_sampling_down_factor(struct kobject *a,
224 struct attribute *b, 187 struct attribute *b,
225 const char *buf, size_t count) 188 const char *buf, size_t count)
@@ -231,10 +194,7 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
231 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 194 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
232 return -EINVAL; 195 return -EINVAL;
233 196
234 mutex_lock(&dbs_mutex);
235 dbs_tuners_ins.sampling_down_factor = input; 197 dbs_tuners_ins.sampling_down_factor = input;
236 mutex_unlock(&dbs_mutex);
237
238 return count; 198 return count;
239} 199}
240 200
@@ -248,10 +208,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
248 if (ret != 1) 208 if (ret != 1)
249 return -EINVAL; 209 return -EINVAL;
250 210
251 mutex_lock(&dbs_mutex);
252 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 211 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
253 mutex_unlock(&dbs_mutex);
254
255 return count; 212 return count;
256} 213}
257 214
@@ -262,16 +219,11 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
262 int ret; 219 int ret;
263 ret = sscanf(buf, "%u", &input); 220 ret = sscanf(buf, "%u", &input);
264 221
265 mutex_lock(&dbs_mutex);
266 if (ret != 1 || input > 100 || 222 if (ret != 1 || input > 100 ||
267 input <= dbs_tuners_ins.down_threshold) { 223 input <= dbs_tuners_ins.down_threshold)
268 mutex_unlock(&dbs_mutex);
269 return -EINVAL; 224 return -EINVAL;
270 }
271 225
272 dbs_tuners_ins.up_threshold = input; 226 dbs_tuners_ins.up_threshold = input;
273 mutex_unlock(&dbs_mutex);
274
275 return count; 227 return count;
276} 228}
277 229
@@ -282,17 +234,12 @@ static ssize_t store_down_threshold(struct kobject *a, struct attribute *b,
282 int ret; 234 int ret;
283 ret = sscanf(buf, "%u", &input); 235 ret = sscanf(buf, "%u", &input);
284 236
285 mutex_lock(&dbs_mutex);
286 /* cannot be lower than 11 otherwise freq will not fall */ 237 /* cannot be lower than 11 otherwise freq will not fall */
287 if (ret != 1 || input < 11 || input > 100 || 238 if (ret != 1 || input < 11 || input > 100 ||
288 input >= dbs_tuners_ins.up_threshold) { 239 input >= dbs_tuners_ins.up_threshold)
289 mutex_unlock(&dbs_mutex);
290 return -EINVAL; 240 return -EINVAL;
291 }
292 241
293 dbs_tuners_ins.down_threshold = input; 242 dbs_tuners_ins.down_threshold = input;
294 mutex_unlock(&dbs_mutex);
295
296 return count; 243 return count;
297} 244}
298 245
@@ -311,11 +258,9 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
311 if (input > 1) 258 if (input > 1)
312 input = 1; 259 input = 1;
313 260
314 mutex_lock(&dbs_mutex); 261 if (input == dbs_tuners_ins.ignore_nice) /* nothing to do */
315 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
316 mutex_unlock(&dbs_mutex);
317 return count; 262 return count;
318 } 263
319 dbs_tuners_ins.ignore_nice = input; 264 dbs_tuners_ins.ignore_nice = input;
320 265
321 /* we need to re-evaluate prev_cpu_idle */ 266 /* we need to re-evaluate prev_cpu_idle */
@@ -327,8 +272,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
327 if (dbs_tuners_ins.ignore_nice) 272 if (dbs_tuners_ins.ignore_nice)
328 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 273 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
329 } 274 }
330 mutex_unlock(&dbs_mutex);
331
332 return count; 275 return count;
333} 276}
334 277
@@ -347,10 +290,7 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
347 290
348 /* no need to test here if freq_step is zero as the user might actually 291 /* no need to test here if freq_step is zero as the user might actually
349 * want this, they would be crazy though :) */ 292 * want this, they would be crazy though :) */
350 mutex_lock(&dbs_mutex);
351 dbs_tuners_ins.freq_step = input; 293 dbs_tuners_ins.freq_step = input;
352 mutex_unlock(&dbs_mutex);
353
354 return count; 294 return count;
355} 295}
356 296
@@ -362,7 +302,6 @@ define_one_global_rw(ignore_nice_load);
362define_one_global_rw(freq_step); 302define_one_global_rw(freq_step);
363 303
364static struct attribute *dbs_attributes[] = { 304static struct attribute *dbs_attributes[] = {
365 &sampling_rate_max.attr,
366 &sampling_rate_min.attr, 305 &sampling_rate_min.attr,
367 &sampling_rate.attr, 306 &sampling_rate.attr,
368 &sampling_down_factor.attr, 307 &sampling_down_factor.attr,
@@ -378,49 +317,6 @@ static struct attribute_group dbs_attr_group = {
378 .name = "conservative", 317 .name = "conservative",
379}; 318};
380 319
381/*** delete after deprecation time ***/
382
383#define write_one_old(file_name) \
384static ssize_t store_##file_name##_old \
385(struct cpufreq_policy *unused, const char *buf, size_t count) \
386{ \
387 printk_once(KERN_INFO "CPUFREQ: Per core conservative sysfs " \
388 "interface is deprecated - " #file_name "\n"); \
389 return store_##file_name(NULL, NULL, buf, count); \
390}
391write_one_old(sampling_rate);
392write_one_old(sampling_down_factor);
393write_one_old(up_threshold);
394write_one_old(down_threshold);
395write_one_old(ignore_nice_load);
396write_one_old(freq_step);
397
398cpufreq_freq_attr_rw_old(sampling_rate);
399cpufreq_freq_attr_rw_old(sampling_down_factor);
400cpufreq_freq_attr_rw_old(up_threshold);
401cpufreq_freq_attr_rw_old(down_threshold);
402cpufreq_freq_attr_rw_old(ignore_nice_load);
403cpufreq_freq_attr_rw_old(freq_step);
404
405static struct attribute *dbs_attributes_old[] = {
406 &sampling_rate_max_old.attr,
407 &sampling_rate_min_old.attr,
408 &sampling_rate_old.attr,
409 &sampling_down_factor_old.attr,
410 &up_threshold_old.attr,
411 &down_threshold_old.attr,
412 &ignore_nice_load_old.attr,
413 &freq_step_old.attr,
414 NULL
415};
416
417static struct attribute_group dbs_attr_group_old = {
418 .attrs = dbs_attributes_old,
419 .name = "conservative",
420};
421
422/*** delete after deprecation time ***/
423
424/************************** sysfs end ************************/ 320/************************** sysfs end ************************/
425 321
426static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) 322static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
@@ -596,12 +492,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
596 492
597 mutex_lock(&dbs_mutex); 493 mutex_lock(&dbs_mutex);
598 494
599 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
600 if (rc) {
601 mutex_unlock(&dbs_mutex);
602 return rc;
603 }
604
605 for_each_cpu(j, policy->cpus) { 495 for_each_cpu(j, policy->cpus) {
606 struct cpu_dbs_info_s *j_dbs_info; 496 struct cpu_dbs_info_s *j_dbs_info;
607 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j); 497 j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);
@@ -664,7 +554,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
664 dbs_timer_exit(this_dbs_info); 554 dbs_timer_exit(this_dbs_info);
665 555
666 mutex_lock(&dbs_mutex); 556 mutex_lock(&dbs_mutex);
667 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
668 dbs_enable--; 557 dbs_enable--;
669 mutex_destroy(&this_dbs_info->timer_mutex); 558 mutex_destroy(&this_dbs_info->timer_mutex);
670 559
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
index 58aa85ea5ec6..891360edecdd 100644
--- a/drivers/cpufreq/cpufreq_ondemand.c
+++ b/drivers/cpufreq/cpufreq_ondemand.c
@@ -99,8 +99,7 @@ static DEFINE_PER_CPU(struct cpu_dbs_info_s, od_cpu_dbs_info);
99static unsigned int dbs_enable; /* number of CPUs using this policy */ 99static unsigned int dbs_enable; /* number of CPUs using this policy */
100 100
101/* 101/*
102 * dbs_mutex protects data in dbs_tuners_ins from concurrent changes on 102 * dbs_mutex protects dbs_enable in governor start/stop.
103 * different CPUs. It protects dbs_enable in governor start/stop.
104 */ 103 */
105static DEFINE_MUTEX(dbs_mutex); 104static DEFINE_MUTEX(dbs_mutex);
106 105
@@ -235,21 +234,12 @@ static void ondemand_powersave_bias_init(void)
235 234
236/************************** sysfs interface ************************/ 235/************************** sysfs interface ************************/
237 236
238static ssize_t show_sampling_rate_max(struct kobject *kobj,
239 struct attribute *attr, char *buf)
240{
241 printk_once(KERN_INFO "CPUFREQ: ondemand sampling_rate_max "
242 "sysfs file is deprecated - used by: %s\n", current->comm);
243 return sprintf(buf, "%u\n", -1U);
244}
245
246static ssize_t show_sampling_rate_min(struct kobject *kobj, 237static ssize_t show_sampling_rate_min(struct kobject *kobj,
247 struct attribute *attr, char *buf) 238 struct attribute *attr, char *buf)
248{ 239{
249 return sprintf(buf, "%u\n", min_sampling_rate); 240 return sprintf(buf, "%u\n", min_sampling_rate);
250} 241}
251 242
252define_one_global_ro(sampling_rate_max);
253define_one_global_ro(sampling_rate_min); 243define_one_global_ro(sampling_rate_min);
254 244
255/* cpufreq_ondemand Governor Tunables */ 245/* cpufreq_ondemand Governor Tunables */
@@ -266,32 +256,6 @@ show_one(sampling_down_factor, sampling_down_factor);
266show_one(ignore_nice_load, ignore_nice); 256show_one(ignore_nice_load, ignore_nice);
267show_one(powersave_bias, powersave_bias); 257show_one(powersave_bias, powersave_bias);
268 258
269/*** delete after deprecation time ***/
270
271#define DEPRECATION_MSG(file_name) \
272 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
273 "interface is deprecated - " #file_name "\n");
274
275#define show_one_old(file_name) \
276static ssize_t show_##file_name##_old \
277(struct cpufreq_policy *unused, char *buf) \
278{ \
279 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
280 "interface is deprecated - " #file_name "\n"); \
281 return show_##file_name(NULL, NULL, buf); \
282}
283show_one_old(sampling_rate);
284show_one_old(up_threshold);
285show_one_old(ignore_nice_load);
286show_one_old(powersave_bias);
287show_one_old(sampling_rate_min);
288show_one_old(sampling_rate_max);
289
290cpufreq_freq_attr_ro_old(sampling_rate_min);
291cpufreq_freq_attr_ro_old(sampling_rate_max);
292
293/*** delete after deprecation time ***/
294
295static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b, 259static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
296 const char *buf, size_t count) 260 const char *buf, size_t count)
297{ 261{
@@ -300,11 +264,7 @@ static ssize_t store_sampling_rate(struct kobject *a, struct attribute *b,
300 ret = sscanf(buf, "%u", &input); 264 ret = sscanf(buf, "%u", &input);
301 if (ret != 1) 265 if (ret != 1)
302 return -EINVAL; 266 return -EINVAL;
303
304 mutex_lock(&dbs_mutex);
305 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate); 267 dbs_tuners_ins.sampling_rate = max(input, min_sampling_rate);
306 mutex_unlock(&dbs_mutex);
307
308 return count; 268 return count;
309} 269}
310 270
@@ -317,11 +277,7 @@ static ssize_t store_io_is_busy(struct kobject *a, struct attribute *b,
317 ret = sscanf(buf, "%u", &input); 277 ret = sscanf(buf, "%u", &input);
318 if (ret != 1) 278 if (ret != 1)
319 return -EINVAL; 279 return -EINVAL;
320
321 mutex_lock(&dbs_mutex);
322 dbs_tuners_ins.io_is_busy = !!input; 280 dbs_tuners_ins.io_is_busy = !!input;
323 mutex_unlock(&dbs_mutex);
324
325 return count; 281 return count;
326} 282}
327 283
@@ -336,11 +292,7 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b,
336 input < MIN_FREQUENCY_UP_THRESHOLD) { 292 input < MIN_FREQUENCY_UP_THRESHOLD) {
337 return -EINVAL; 293 return -EINVAL;
338 } 294 }
339
340 mutex_lock(&dbs_mutex);
341 dbs_tuners_ins.up_threshold = input; 295 dbs_tuners_ins.up_threshold = input;
342 mutex_unlock(&dbs_mutex);
343
344 return count; 296 return count;
345} 297}
346 298
@@ -353,7 +305,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
353 305
354 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) 306 if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
355 return -EINVAL; 307 return -EINVAL;
356 mutex_lock(&dbs_mutex);
357 dbs_tuners_ins.sampling_down_factor = input; 308 dbs_tuners_ins.sampling_down_factor = input;
358 309
359 /* Reset down sampling multiplier in case it was active */ 310 /* Reset down sampling multiplier in case it was active */
@@ -362,8 +313,6 @@ static ssize_t store_sampling_down_factor(struct kobject *a,
362 dbs_info = &per_cpu(od_cpu_dbs_info, j); 313 dbs_info = &per_cpu(od_cpu_dbs_info, j);
363 dbs_info->rate_mult = 1; 314 dbs_info->rate_mult = 1;
364 } 315 }
365 mutex_unlock(&dbs_mutex);
366
367 return count; 316 return count;
368} 317}
369 318
@@ -382,9 +331,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
382 if (input > 1) 331 if (input > 1)
383 input = 1; 332 input = 1;
384 333
385 mutex_lock(&dbs_mutex);
386 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */ 334 if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
387 mutex_unlock(&dbs_mutex);
388 return count; 335 return count;
389 } 336 }
390 dbs_tuners_ins.ignore_nice = input; 337 dbs_tuners_ins.ignore_nice = input;
@@ -399,8 +346,6 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
399 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice; 346 dbs_info->prev_cpu_nice = kstat_cpu(j).cpustat.nice;
400 347
401 } 348 }
402 mutex_unlock(&dbs_mutex);
403
404 return count; 349 return count;
405} 350}
406 351
@@ -417,11 +362,8 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b,
417 if (input > 1000) 362 if (input > 1000)
418 input = 1000; 363 input = 1000;
419 364
420 mutex_lock(&dbs_mutex);
421 dbs_tuners_ins.powersave_bias = input; 365 dbs_tuners_ins.powersave_bias = input;
422 ondemand_powersave_bias_init(); 366 ondemand_powersave_bias_init();
423 mutex_unlock(&dbs_mutex);
424
425 return count; 367 return count;
426} 368}
427 369
@@ -433,7 +375,6 @@ define_one_global_rw(ignore_nice_load);
433define_one_global_rw(powersave_bias); 375define_one_global_rw(powersave_bias);
434 376
435static struct attribute *dbs_attributes[] = { 377static struct attribute *dbs_attributes[] = {
436 &sampling_rate_max.attr,
437 &sampling_rate_min.attr, 378 &sampling_rate_min.attr,
438 &sampling_rate.attr, 379 &sampling_rate.attr,
439 &up_threshold.attr, 380 &up_threshold.attr,
@@ -449,43 +390,6 @@ static struct attribute_group dbs_attr_group = {
449 .name = "ondemand", 390 .name = "ondemand",
450}; 391};
451 392
452/*** delete after deprecation time ***/
453
454#define write_one_old(file_name) \
455static ssize_t store_##file_name##_old \
456(struct cpufreq_policy *unused, const char *buf, size_t count) \
457{ \
458 printk_once(KERN_INFO "CPUFREQ: Per core ondemand sysfs " \
459 "interface is deprecated - " #file_name "\n"); \
460 return store_##file_name(NULL, NULL, buf, count); \
461}
462write_one_old(sampling_rate);
463write_one_old(up_threshold);
464write_one_old(ignore_nice_load);
465write_one_old(powersave_bias);
466
467cpufreq_freq_attr_rw_old(sampling_rate);
468cpufreq_freq_attr_rw_old(up_threshold);
469cpufreq_freq_attr_rw_old(ignore_nice_load);
470cpufreq_freq_attr_rw_old(powersave_bias);
471
472static struct attribute *dbs_attributes_old[] = {
473 &sampling_rate_max_old.attr,
474 &sampling_rate_min_old.attr,
475 &sampling_rate_old.attr,
476 &up_threshold_old.attr,
477 &ignore_nice_load_old.attr,
478 &powersave_bias_old.attr,
479 NULL
480};
481
482static struct attribute_group dbs_attr_group_old = {
483 .attrs = dbs_attributes_old,
484 .name = "ondemand",
485};
486
487/*** delete after deprecation time ***/
488
489/************************** sysfs end ************************/ 393/************************** sysfs end ************************/
490 394
491static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq) 395static void dbs_freq_increase(struct cpufreq_policy *p, unsigned int freq)
@@ -642,12 +546,7 @@ static void do_dbs_timer(struct work_struct *work)
642 unsigned int cpu = dbs_info->cpu; 546 unsigned int cpu = dbs_info->cpu;
643 int sample_type = dbs_info->sample_type; 547 int sample_type = dbs_info->sample_type;
644 548
645 /* We want all CPUs to do sampling nearly on same jiffy */ 549 int delay;
646 int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
647 * dbs_info->rate_mult);
648
649 if (num_online_cpus() > 1)
650 delay -= jiffies % delay;
651 550
652 mutex_lock(&dbs_info->timer_mutex); 551 mutex_lock(&dbs_info->timer_mutex);
653 552
@@ -660,10 +559,20 @@ static void do_dbs_timer(struct work_struct *work)
660 /* Setup timer for SUB_SAMPLE */ 559 /* Setup timer for SUB_SAMPLE */
661 dbs_info->sample_type = DBS_SUB_SAMPLE; 560 dbs_info->sample_type = DBS_SUB_SAMPLE;
662 delay = dbs_info->freq_hi_jiffies; 561 delay = dbs_info->freq_hi_jiffies;
562 } else {
563 /* We want all CPUs to do sampling nearly on
564 * same jiffy
565 */
566 delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate
567 * dbs_info->rate_mult);
568
569 if (num_online_cpus() > 1)
570 delay -= jiffies % delay;
663 } 571 }
664 } else { 572 } else {
665 __cpufreq_driver_target(dbs_info->cur_policy, 573 __cpufreq_driver_target(dbs_info->cur_policy,
666 dbs_info->freq_lo, CPUFREQ_RELATION_H); 574 dbs_info->freq_lo, CPUFREQ_RELATION_H);
575 delay = dbs_info->freq_lo_jiffies;
667 } 576 }
668 schedule_delayed_work_on(cpu, &dbs_info->work, delay); 577 schedule_delayed_work_on(cpu, &dbs_info->work, delay);
669 mutex_unlock(&dbs_info->timer_mutex); 578 mutex_unlock(&dbs_info->timer_mutex);
@@ -727,12 +636,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
727 636
728 mutex_lock(&dbs_mutex); 637 mutex_lock(&dbs_mutex);
729 638
730 rc = sysfs_create_group(&policy->kobj, &dbs_attr_group_old);
731 if (rc) {
732 mutex_unlock(&dbs_mutex);
733 return rc;
734 }
735
736 dbs_enable++; 639 dbs_enable++;
737 for_each_cpu(j, policy->cpus) { 640 for_each_cpu(j, policy->cpus) {
738 struct cpu_dbs_info_s *j_dbs_info; 641 struct cpu_dbs_info_s *j_dbs_info;
@@ -785,7 +688,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
785 dbs_timer_exit(this_dbs_info); 688 dbs_timer_exit(this_dbs_info);
786 689
787 mutex_lock(&dbs_mutex); 690 mutex_lock(&dbs_mutex);
788 sysfs_remove_group(&policy->kobj, &dbs_attr_group_old);
789 mutex_destroy(&this_dbs_info->timer_mutex); 691 mutex_destroy(&this_dbs_info->timer_mutex);
790 dbs_enable--; 692 dbs_enable--;
791 mutex_unlock(&dbs_mutex); 693 mutex_unlock(&dbs_mutex);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index c3e9de8321c6..9343dd3de858 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -230,7 +230,7 @@ struct cpufreq_driver {
230 int (*bios_limit) (int cpu, unsigned int *limit); 230 int (*bios_limit) (int cpu, unsigned int *limit);
231 231
232 int (*exit) (struct cpufreq_policy *policy); 232 int (*exit) (struct cpufreq_policy *policy);
233 int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); 233 int (*suspend) (struct cpufreq_policy *policy);
234 int (*resume) (struct cpufreq_policy *policy); 234 int (*resume) (struct cpufreq_policy *policy);
235 struct freq_attr **attr; 235 struct freq_attr **attr;
236}; 236};
@@ -281,19 +281,10 @@ __ATTR(_name, 0444, show_##_name, NULL)
281static struct freq_attr _name = \ 281static struct freq_attr _name = \
282__ATTR(_name, _perm, show_##_name, NULL) 282__ATTR(_name, _perm, show_##_name, NULL)
283 283
284#define cpufreq_freq_attr_ro_old(_name) \
285static struct freq_attr _name##_old = \
286__ATTR(_name, 0444, show_##_name##_old, NULL)
287
288#define cpufreq_freq_attr_rw(_name) \ 284#define cpufreq_freq_attr_rw(_name) \
289static struct freq_attr _name = \ 285static struct freq_attr _name = \
290__ATTR(_name, 0644, show_##_name, store_##_name) 286__ATTR(_name, 0644, show_##_name, store_##_name)
291 287
292#define cpufreq_freq_attr_rw_old(_name) \
293static struct freq_attr _name##_old = \
294__ATTR(_name, 0644, show_##_name##_old, store_##_name##_old)
295
296
297struct global_attr { 288struct global_attr {
298 struct attribute attr; 289 struct attribute attr;
299 ssize_t (*show)(struct kobject *kobj, 290 ssize_t (*show)(struct kobject *kobj,