diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-10-14 20:31:54 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2008-10-14 20:31:54 -0400 |
commit | 6dc6472581f693b5fc95aebedf67b4960fb85cf0 (patch) | |
tree | 06a5a9a08519950575505273eabced331ed51405 /drivers/cpufreq | |
parent | ee673eaa72d8d185012b1027a05e25aba18c267f (diff) | |
parent | 8acd3a60bcca17c6d89c73cee3ad6057eb83ba1e (diff) |
Merge commit 'origin'
Manual fixup of conflicts on:
arch/powerpc/include/asm/dcr-regs.h
drivers/net/ibm_newemac/core.h
Diffstat (limited to 'drivers/cpufreq')
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 30 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 147 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_performance.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_powersave.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 4 |
6 files changed, 139 insertions, 55 deletions
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8a67f16987db..31d6f535a79d 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1467,25 +1467,27 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1467 | unsigned int target_freq, | 1467 | unsigned int target_freq, |
1468 | unsigned int relation) | 1468 | unsigned int relation) |
1469 | { | 1469 | { |
1470 | int ret; | 1470 | int ret = -EINVAL; |
1471 | 1471 | ||
1472 | policy = cpufreq_cpu_get(policy->cpu); | 1472 | policy = cpufreq_cpu_get(policy->cpu); |
1473 | if (!policy) | 1473 | if (!policy) |
1474 | return -EINVAL; | 1474 | goto no_policy; |
1475 | 1475 | ||
1476 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) | 1476 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1477 | return -EINVAL; | 1477 | goto fail; |
1478 | 1478 | ||
1479 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1479 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1480 | 1480 | ||
1481 | unlock_policy_rwsem_write(policy->cpu); | 1481 | unlock_policy_rwsem_write(policy->cpu); |
1482 | 1482 | ||
1483 | fail: | ||
1483 | cpufreq_cpu_put(policy); | 1484 | cpufreq_cpu_put(policy); |
1485 | no_policy: | ||
1484 | return ret; | 1486 | return ret; |
1485 | } | 1487 | } |
1486 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1488 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1487 | 1489 | ||
1488 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy) | 1490 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) |
1489 | { | 1491 | { |
1490 | int ret = 0; | 1492 | int ret = 0; |
1491 | 1493 | ||
@@ -1493,8 +1495,8 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy) | |||
1493 | if (!policy) | 1495 | if (!policy) |
1494 | return -EINVAL; | 1496 | return -EINVAL; |
1495 | 1497 | ||
1496 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) | 1498 | if (cpu_online(cpu) && cpufreq_driver->getavg) |
1497 | ret = cpufreq_driver->getavg(policy->cpu); | 1499 | ret = cpufreq_driver->getavg(policy, cpu); |
1498 | 1500 | ||
1499 | cpufreq_cpu_put(policy); | 1501 | cpufreq_cpu_put(policy); |
1500 | return ret; | 1502 | return ret; |
@@ -1717,13 +1719,17 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1717 | { | 1719 | { |
1718 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); | 1720 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); |
1719 | struct cpufreq_policy policy; | 1721 | struct cpufreq_policy policy; |
1720 | int ret = 0; | 1722 | int ret; |
1721 | 1723 | ||
1722 | if (!data) | 1724 | if (!data) { |
1723 | return -ENODEV; | 1725 | ret = -ENODEV; |
1726 | goto no_policy; | ||
1727 | } | ||
1724 | 1728 | ||
1725 | if (unlikely(lock_policy_rwsem_write(cpu))) | 1729 | if (unlikely(lock_policy_rwsem_write(cpu))) { |
1726 | return -EINVAL; | 1730 | ret = -EINVAL; |
1731 | goto fail; | ||
1732 | } | ||
1727 | 1733 | ||
1728 | dprintk("updating policy for CPU %u\n", cpu); | 1734 | dprintk("updating policy for CPU %u\n", cpu); |
1729 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1735 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
@@ -1750,7 +1756,9 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1750 | 1756 | ||
1751 | unlock_policy_rwsem_write(cpu); | 1757 | unlock_policy_rwsem_write(cpu); |
1752 | 1758 | ||
1759 | fail: | ||
1753 | cpufreq_cpu_put(data); | 1760 | cpufreq_cpu_put(data); |
1761 | no_policy: | ||
1754 | return ret; | 1762 | return ret; |
1755 | } | 1763 | } |
1756 | EXPORT_SYMBOL(cpufreq_update_policy); | 1764 | EXPORT_SYMBOL(cpufreq_update_policy); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index ac0bbf2d234f..e2657837d954 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -460,6 +460,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
460 | 460 | ||
461 | static inline void dbs_timer_init(void) | 461 | static inline void dbs_timer_init(void) |
462 | { | 462 | { |
463 | init_timer_deferrable(&dbs_work.timer); | ||
463 | schedule_delayed_work(&dbs_work, | 464 | schedule_delayed_work(&dbs_work, |
464 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 465 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
465 | return; | 466 | return; |
@@ -575,13 +576,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
575 | return 0; | 576 | return 0; |
576 | } | 577 | } |
577 | 578 | ||
579 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | ||
580 | static | ||
581 | #endif | ||
578 | struct cpufreq_governor cpufreq_gov_conservative = { | 582 | struct cpufreq_governor cpufreq_gov_conservative = { |
579 | .name = "conservative", | 583 | .name = "conservative", |
580 | .governor = cpufreq_governor_dbs, | 584 | .governor = cpufreq_governor_dbs, |
581 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | 585 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
582 | .owner = THIS_MODULE, | 586 | .owner = THIS_MODULE, |
583 | }; | 587 | }; |
584 | EXPORT_SYMBOL(cpufreq_gov_conservative); | ||
585 | 588 | ||
586 | static int __init cpufreq_gov_dbs_init(void) | 589 | static int __init cpufreq_gov_dbs_init(void) |
587 | { | 590 | { |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 33855cb3cf16..2ab3c12b88af 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -18,13 +18,19 @@ | |||
18 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
19 | #include <linux/kernel_stat.h> | 19 | #include <linux/kernel_stat.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/hrtimer.h> | ||
22 | #include <linux/tick.h> | ||
23 | #include <linux/ktime.h> | ||
21 | 24 | ||
22 | /* | 25 | /* |
23 | * dbs is used in this file as a shortform for demandbased switching | 26 | * dbs is used in this file as a shortform for demandbased switching |
24 | * It helps to keep variable names smaller, simpler | 27 | * It helps to keep variable names smaller, simpler |
25 | */ | 28 | */ |
26 | 29 | ||
30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | ||
27 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
32 | #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) | ||
33 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) | ||
28 | #define MIN_FREQUENCY_UP_THRESHOLD (11) | 34 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
29 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | 35 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
30 | 36 | ||
@@ -57,6 +63,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | |||
57 | struct cpu_dbs_info_s { | 63 | struct cpu_dbs_info_s { |
58 | cputime64_t prev_cpu_idle; | 64 | cputime64_t prev_cpu_idle; |
59 | cputime64_t prev_cpu_wall; | 65 | cputime64_t prev_cpu_wall; |
66 | cputime64_t prev_cpu_nice; | ||
60 | struct cpufreq_policy *cur_policy; | 67 | struct cpufreq_policy *cur_policy; |
61 | struct delayed_work work; | 68 | struct delayed_work work; |
62 | struct cpufreq_frequency_table *freq_table; | 69 | struct cpufreq_frequency_table *freq_table; |
@@ -86,21 +93,24 @@ static struct workqueue_struct *kondemand_wq; | |||
86 | static struct dbs_tuners { | 93 | static struct dbs_tuners { |
87 | unsigned int sampling_rate; | 94 | unsigned int sampling_rate; |
88 | unsigned int up_threshold; | 95 | unsigned int up_threshold; |
96 | unsigned int down_differential; | ||
89 | unsigned int ignore_nice; | 97 | unsigned int ignore_nice; |
90 | unsigned int powersave_bias; | 98 | unsigned int powersave_bias; |
91 | } dbs_tuners_ins = { | 99 | } dbs_tuners_ins = { |
92 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 100 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
101 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | ||
93 | .ignore_nice = 0, | 102 | .ignore_nice = 0, |
94 | .powersave_bias = 0, | 103 | .powersave_bias = 0, |
95 | }; | 104 | }; |
96 | 105 | ||
97 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | 106 | static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, |
107 | cputime64_t *wall) | ||
98 | { | 108 | { |
99 | cputime64_t idle_time; | 109 | cputime64_t idle_time; |
100 | cputime64_t cur_jiffies; | 110 | cputime64_t cur_wall_time; |
101 | cputime64_t busy_time; | 111 | cputime64_t busy_time; |
102 | 112 | ||
103 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | 113 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
104 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 114 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, |
105 | kstat_cpu(cpu).cpustat.system); | 115 | kstat_cpu(cpu).cpustat.system); |
106 | 116 | ||
@@ -113,7 +123,37 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | |||
113 | kstat_cpu(cpu).cpustat.nice); | 123 | kstat_cpu(cpu).cpustat.nice); |
114 | } | 124 | } |
115 | 125 | ||
116 | idle_time = cputime64_sub(cur_jiffies, busy_time); | 126 | idle_time = cputime64_sub(cur_wall_time, busy_time); |
127 | if (wall) | ||
128 | *wall = cur_wall_time; | ||
129 | |||
130 | return idle_time; | ||
131 | } | ||
132 | |||
133 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | ||
134 | { | ||
135 | u64 idle_time = get_cpu_idle_time_us(cpu, wall); | ||
136 | |||
137 | if (idle_time == -1ULL) | ||
138 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
139 | |||
140 | if (dbs_tuners_ins.ignore_nice) { | ||
141 | cputime64_t cur_nice; | ||
142 | unsigned long cur_nice_jiffies; | ||
143 | struct cpu_dbs_info_s *dbs_info; | ||
144 | |||
145 | dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
146 | cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice, | ||
147 | dbs_info->prev_cpu_nice); | ||
148 | /* | ||
149 | * Assumption: nice time between sampling periods will be | ||
150 | * less than 2^32 jiffies for 32 bit sys | ||
151 | */ | ||
152 | cur_nice_jiffies = (unsigned long) | ||
153 | cputime64_to_jiffies64(cur_nice); | ||
154 | dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice; | ||
155 | return idle_time + jiffies_to_usecs(cur_nice_jiffies); | ||
156 | } | ||
117 | return idle_time; | 157 | return idle_time; |
118 | } | 158 | } |
119 | 159 | ||
@@ -277,8 +317,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
277 | for_each_online_cpu(j) { | 317 | for_each_online_cpu(j) { |
278 | struct cpu_dbs_info_s *dbs_info; | 318 | struct cpu_dbs_info_s *dbs_info; |
279 | dbs_info = &per_cpu(cpu_dbs_info, j); | 319 | dbs_info = &per_cpu(cpu_dbs_info, j); |
280 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | 320 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
281 | dbs_info->prev_cpu_wall = get_jiffies_64(); | 321 | &dbs_info->prev_cpu_wall); |
282 | } | 322 | } |
283 | mutex_unlock(&dbs_mutex); | 323 | mutex_unlock(&dbs_mutex); |
284 | 324 | ||
@@ -334,9 +374,7 @@ static struct attribute_group dbs_attr_group = { | |||
334 | 374 | ||
335 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 375 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
336 | { | 376 | { |
337 | unsigned int idle_ticks, total_ticks; | 377 | unsigned int max_load_freq; |
338 | unsigned int load = 0; | ||
339 | cputime64_t cur_jiffies; | ||
340 | 378 | ||
341 | struct cpufreq_policy *policy; | 379 | struct cpufreq_policy *policy; |
342 | unsigned int j; | 380 | unsigned int j; |
@@ -346,13 +384,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
346 | 384 | ||
347 | this_dbs_info->freq_lo = 0; | 385 | this_dbs_info->freq_lo = 0; |
348 | policy = this_dbs_info->cur_policy; | 386 | policy = this_dbs_info->cur_policy; |
349 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | ||
350 | total_ticks = (unsigned int) cputime64_sub(cur_jiffies, | ||
351 | this_dbs_info->prev_cpu_wall); | ||
352 | this_dbs_info->prev_cpu_wall = get_jiffies_64(); | ||
353 | 387 | ||
354 | if (!total_ticks) | ||
355 | return; | ||
356 | /* | 388 | /* |
357 | * Every sampling_rate, we check, if current idle time is less | 389 | * Every sampling_rate, we check, if current idle time is less |
358 | * than 20% (default), then we try to increase frequency | 390 | * than 20% (default), then we try to increase frequency |
@@ -365,27 +397,44 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
365 | * 5% (default) of current frequency | 397 | * 5% (default) of current frequency |
366 | */ | 398 | */ |
367 | 399 | ||
368 | /* Get Idle Time */ | 400 | /* Get Absolute Load - in terms of freq */ |
369 | idle_ticks = UINT_MAX; | 401 | max_load_freq = 0; |
402 | |||
370 | for_each_cpu_mask_nr(j, policy->cpus) { | 403 | for_each_cpu_mask_nr(j, policy->cpus) { |
371 | cputime64_t total_idle_ticks; | ||
372 | unsigned int tmp_idle_ticks; | ||
373 | struct cpu_dbs_info_s *j_dbs_info; | 404 | struct cpu_dbs_info_s *j_dbs_info; |
405 | cputime64_t cur_wall_time, cur_idle_time; | ||
406 | unsigned int idle_time, wall_time; | ||
407 | unsigned int load, load_freq; | ||
408 | int freq_avg; | ||
374 | 409 | ||
375 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 410 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
376 | total_idle_ticks = get_cpu_idle_time(j); | 411 | |
377 | tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, | 412 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
413 | |||
414 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | ||
415 | j_dbs_info->prev_cpu_wall); | ||
416 | j_dbs_info->prev_cpu_wall = cur_wall_time; | ||
417 | |||
418 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | ||
378 | j_dbs_info->prev_cpu_idle); | 419 | j_dbs_info->prev_cpu_idle); |
379 | j_dbs_info->prev_cpu_idle = total_idle_ticks; | 420 | j_dbs_info->prev_cpu_idle = cur_idle_time; |
421 | |||
422 | if (unlikely(!wall_time || wall_time < idle_time)) | ||
423 | continue; | ||
424 | |||
425 | load = 100 * (wall_time - idle_time) / wall_time; | ||
426 | |||
427 | freq_avg = __cpufreq_driver_getavg(policy, j); | ||
428 | if (freq_avg <= 0) | ||
429 | freq_avg = policy->cur; | ||
380 | 430 | ||
381 | if (tmp_idle_ticks < idle_ticks) | 431 | load_freq = load * freq_avg; |
382 | idle_ticks = tmp_idle_ticks; | 432 | if (load_freq > max_load_freq) |
433 | max_load_freq = load_freq; | ||
383 | } | 434 | } |
384 | if (likely(total_ticks > idle_ticks)) | ||
385 | load = (100 * (total_ticks - idle_ticks)) / total_ticks; | ||
386 | 435 | ||
387 | /* Check for frequency increase */ | 436 | /* Check for frequency increase */ |
388 | if (load > dbs_tuners_ins.up_threshold) { | 437 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { |
389 | /* if we are already at full speed then break out early */ | 438 | /* if we are already at full speed then break out early */ |
390 | if (!dbs_tuners_ins.powersave_bias) { | 439 | if (!dbs_tuners_ins.powersave_bias) { |
391 | if (policy->cur == policy->max) | 440 | if (policy->cur == policy->max) |
@@ -412,15 +461,13 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
412 | * can support the current CPU usage without triggering the up | 461 | * can support the current CPU usage without triggering the up |
413 | * policy. To be safe, we focus 10 points under the threshold. | 462 | * policy. To be safe, we focus 10 points under the threshold. |
414 | */ | 463 | */ |
415 | if (load < (dbs_tuners_ins.up_threshold - 10)) { | 464 | if (max_load_freq < |
416 | unsigned int freq_next, freq_cur; | 465 | (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * |
417 | 466 | policy->cur) { | |
418 | freq_cur = __cpufreq_driver_getavg(policy); | 467 | unsigned int freq_next; |
419 | if (!freq_cur) | 468 | freq_next = max_load_freq / |
420 | freq_cur = policy->cur; | 469 | (dbs_tuners_ins.up_threshold - |
421 | 470 | dbs_tuners_ins.down_differential); | |
422 | freq_next = (freq_cur * load) / | ||
423 | (dbs_tuners_ins.up_threshold - 10); | ||
424 | 471 | ||
425 | if (!dbs_tuners_ins.powersave_bias) { | 472 | if (!dbs_tuners_ins.powersave_bias) { |
426 | __cpufreq_driver_target(policy, freq_next, | 473 | __cpufreq_driver_target(policy, freq_next, |
@@ -526,8 +573,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 573 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
527 | j_dbs_info->cur_policy = policy; | 574 | j_dbs_info->cur_policy = policy; |
528 | 575 | ||
529 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | 576 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
530 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); | 577 | &j_dbs_info->prev_cpu_wall); |
531 | } | 578 | } |
532 | this_dbs_info->cpu = cpu; | 579 | this_dbs_info->cpu = cpu; |
533 | /* | 580 | /* |
@@ -579,22 +626,42 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
579 | return 0; | 626 | return 0; |
580 | } | 627 | } |
581 | 628 | ||
629 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | ||
630 | static | ||
631 | #endif | ||
582 | struct cpufreq_governor cpufreq_gov_ondemand = { | 632 | struct cpufreq_governor cpufreq_gov_ondemand = { |
583 | .name = "ondemand", | 633 | .name = "ondemand", |
584 | .governor = cpufreq_governor_dbs, | 634 | .governor = cpufreq_governor_dbs, |
585 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | 635 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
586 | .owner = THIS_MODULE, | 636 | .owner = THIS_MODULE, |
587 | }; | 637 | }; |
588 | EXPORT_SYMBOL(cpufreq_gov_ondemand); | ||
589 | 638 | ||
590 | static int __init cpufreq_gov_dbs_init(void) | 639 | static int __init cpufreq_gov_dbs_init(void) |
591 | { | 640 | { |
641 | int err; | ||
642 | cputime64_t wall; | ||
643 | u64 idle_time; | ||
644 | int cpu = get_cpu(); | ||
645 | |||
646 | idle_time = get_cpu_idle_time_us(cpu, &wall); | ||
647 | put_cpu(); | ||
648 | if (idle_time != -1ULL) { | ||
649 | /* Idle micro accounting is supported. Use finer thresholds */ | ||
650 | dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | ||
651 | dbs_tuners_ins.down_differential = | ||
652 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | ||
653 | } | ||
654 | |||
592 | kondemand_wq = create_workqueue("kondemand"); | 655 | kondemand_wq = create_workqueue("kondemand"); |
593 | if (!kondemand_wq) { | 656 | if (!kondemand_wq) { |
594 | printk(KERN_ERR "Creation of kondemand failed\n"); | 657 | printk(KERN_ERR "Creation of kondemand failed\n"); |
595 | return -EFAULT; | 658 | return -EFAULT; |
596 | } | 659 | } |
597 | return cpufreq_register_governor(&cpufreq_gov_ondemand); | 660 | err = cpufreq_register_governor(&cpufreq_gov_ondemand); |
661 | if (err) | ||
662 | destroy_workqueue(kondemand_wq); | ||
663 | |||
664 | return err; | ||
598 | } | 665 | } |
599 | 666 | ||
600 | static void __exit cpufreq_gov_dbs_exit(void) | 667 | static void __exit cpufreq_gov_dbs_exit(void) |
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index e8e1451ef1c1..7e2e515087f8 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c | |||
@@ -36,12 +36,14 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, | |||
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
38 | 38 | ||
39 | #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE | ||
40 | static | ||
41 | #endif | ||
39 | struct cpufreq_governor cpufreq_gov_performance = { | 42 | struct cpufreq_governor cpufreq_gov_performance = { |
40 | .name = "performance", | 43 | .name = "performance", |
41 | .governor = cpufreq_governor_performance, | 44 | .governor = cpufreq_governor_performance, |
42 | .owner = THIS_MODULE, | 45 | .owner = THIS_MODULE, |
43 | }; | 46 | }; |
44 | EXPORT_SYMBOL(cpufreq_gov_performance); | ||
45 | 47 | ||
46 | 48 | ||
47 | static int __init cpufreq_gov_performance_init(void) | 49 | static int __init cpufreq_gov_performance_init(void) |
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 88d2f44fba48..e6db5faf3eb1 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c | |||
@@ -35,12 +35,14 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy, | |||
35 | return 0; | 35 | return 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE | ||
39 | static | ||
40 | #endif | ||
38 | struct cpufreq_governor cpufreq_gov_powersave = { | 41 | struct cpufreq_governor cpufreq_gov_powersave = { |
39 | .name = "powersave", | 42 | .name = "powersave", |
40 | .governor = cpufreq_governor_powersave, | 43 | .governor = cpufreq_governor_powersave, |
41 | .owner = THIS_MODULE, | 44 | .owner = THIS_MODULE, |
42 | }; | 45 | }; |
43 | EXPORT_SYMBOL(cpufreq_gov_powersave); | ||
44 | 46 | ||
45 | static int __init cpufreq_gov_powersave_init(void) | 47 | static int __init cpufreq_gov_powersave_init(void) |
46 | { | 48 | { |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 32244aa7cc0c..1442bbada053 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -187,6 +187,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
187 | } | 187 | } |
188 | 188 | ||
189 | 189 | ||
190 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE | ||
191 | static | ||
192 | #endif | ||
190 | struct cpufreq_governor cpufreq_gov_userspace = { | 193 | struct cpufreq_governor cpufreq_gov_userspace = { |
191 | .name = "userspace", | 194 | .name = "userspace", |
192 | .governor = cpufreq_governor_userspace, | 195 | .governor = cpufreq_governor_userspace, |
@@ -194,7 +197,6 @@ struct cpufreq_governor cpufreq_gov_userspace = { | |||
194 | .show_setspeed = show_speed, | 197 | .show_setspeed = show_speed, |
195 | .owner = THIS_MODULE, | 198 | .owner = THIS_MODULE, |
196 | }; | 199 | }; |
197 | EXPORT_SYMBOL(cpufreq_gov_userspace); | ||
198 | 200 | ||
199 | static int __init cpufreq_gov_userspace_init(void) | 201 | static int __init cpufreq_gov_userspace_init(void) |
200 | { | 202 | { |