diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-11 11:49:34 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-10-11 11:49:34 -0400 |
commit | 098ef215b1e87cff51f983bae4e4e1358b932ec9 (patch) | |
tree | 2f906ac44e65ce463bbdfa7291773c012663b2c8 | |
parent | b922df7383749a1c0b7ea64c50fa839263d3816b (diff) | |
parent | 4f6e6b9f97b0ce98a8d1da65adbaf743bd0486a9 (diff) |
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq
* 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Fix BUG: using smp_processor_id() in preemptible code
[CPUFREQ] Don't export governors for default governor
[CPUFREQ][6/6] cpufreq: Add idle microaccounting in ondemand governor
[CPUFREQ][5/6] cpufreq: Changes to get_cpu_idle_time_us(), used by ondemand governor
[CPUFREQ][4/6] cpufreq_ondemand: Parameterize down differential
[CPUFREQ][3/6] cpufreq: get_cpu_idle_time() changes in ondemand for idle-microaccounting
[CPUFREQ][2/6] cpufreq: Change load calculation in ondemand for software coordination
[CPUFREQ][1/6] cpufreq: Add cpu number parameter to __cpufreq_driver_getavg()
[CPUFREQ] use deferrable delayed work init in conservative governor
[CPUFREQ] drivers/cpufreq/cpufreq.c: Adjust error handling code involving cpufreq_cpu_put
[CPUFREQ] add error handling for cpufreq_register_governor() error
[CPUFREQ] acpi-cpufreq: add error handling for cpufreq_register_driver() error
[CPUFREQ] Coding style fixes to arch/x86/kernel/cpu/cpufreq/powernow-k6.c
[CPUFREQ] Coding style fixes to arch/x86/kernel/cpu/cpufreq/elanfreq.c
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 13 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/elanfreq.c | 42 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k6.c | 41 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 30 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_conservative.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 147 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_performance.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_powersave.c | 4 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 4 | ||||
-rw-r--r-- | include/linux/cpufreq.h | 7 | ||||
-rw-r--r-- | include/linux/tick.h | 2 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 11 |
12 files changed, 204 insertions, 106 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index dd097b835839..c24c4a487b7c 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -256,7 +256,8 @@ static u32 get_cur_val(const cpumask_t *mask) | |||
256 | * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and | 256 | * Only IA32_APERF/IA32_MPERF ratio is architecturally defined and |
257 | * no meaning should be associated with absolute values of these MSRs. | 257 | * no meaning should be associated with absolute values of these MSRs. |
258 | */ | 258 | */ |
259 | static unsigned int get_measured_perf(unsigned int cpu) | 259 | static unsigned int get_measured_perf(struct cpufreq_policy *policy, |
260 | unsigned int cpu) | ||
260 | { | 261 | { |
261 | union { | 262 | union { |
262 | struct { | 263 | struct { |
@@ -326,7 +327,7 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
326 | 327 | ||
327 | #endif | 328 | #endif |
328 | 329 | ||
329 | retval = per_cpu(drv_data, cpu)->max_freq * perf_percent / 100; | 330 | retval = per_cpu(drv_data, policy->cpu)->max_freq * perf_percent / 100; |
330 | 331 | ||
331 | put_cpu(); | 332 | put_cpu(); |
332 | set_cpus_allowed_ptr(current, &saved_mask); | 333 | set_cpus_allowed_ptr(current, &saved_mask); |
@@ -785,7 +786,11 @@ static int __init acpi_cpufreq_init(void) | |||
785 | if (ret) | 786 | if (ret) |
786 | return ret; | 787 | return ret; |
787 | 788 | ||
788 | return cpufreq_register_driver(&acpi_cpufreq_driver); | 789 | ret = cpufreq_register_driver(&acpi_cpufreq_driver); |
790 | if (ret) | ||
791 | free_percpu(acpi_perf_data); | ||
792 | |||
793 | return ret; | ||
789 | } | 794 | } |
790 | 795 | ||
791 | static void __exit acpi_cpufreq_exit(void) | 796 | static void __exit acpi_cpufreq_exit(void) |
@@ -795,8 +800,6 @@ static void __exit acpi_cpufreq_exit(void) | |||
795 | cpufreq_unregister_driver(&acpi_cpufreq_driver); | 800 | cpufreq_unregister_driver(&acpi_cpufreq_driver); |
796 | 801 | ||
797 | free_percpu(acpi_perf_data); | 802 | free_percpu(acpi_perf_data); |
798 | |||
799 | return; | ||
800 | } | 803 | } |
801 | 804 | ||
802 | module_param(acpi_pstate_strict, uint, 0644); | 805 | module_param(acpi_pstate_strict, uint, 0644); |
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index e4a4bf870e94..fe613c93b366 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c | |||
@@ -25,8 +25,8 @@ | |||
25 | #include <linux/cpufreq.h> | 25 | #include <linux/cpufreq.h> |
26 | 26 | ||
27 | #include <asm/msr.h> | 27 | #include <asm/msr.h> |
28 | #include <asm/timex.h> | 28 | #include <linux/timex.h> |
29 | #include <asm/io.h> | 29 | #include <linux/io.h> |
30 | 30 | ||
31 | #define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ | 31 | #define REG_CSCIR 0x22 /* Chip Setup and Control Index Register */ |
32 | #define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ | 32 | #define REG_CSCDR 0x23 /* Chip Setup and Control Data Register */ |
@@ -82,7 +82,7 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
82 | u8 clockspeed_reg; /* Clock Speed Register */ | 82 | u8 clockspeed_reg; /* Clock Speed Register */ |
83 | 83 | ||
84 | local_irq_disable(); | 84 | local_irq_disable(); |
85 | outb_p(0x80,REG_CSCIR); | 85 | outb_p(0x80, REG_CSCIR); |
86 | clockspeed_reg = inb_p(REG_CSCDR); | 86 | clockspeed_reg = inb_p(REG_CSCDR); |
87 | local_irq_enable(); | 87 | local_irq_enable(); |
88 | 88 | ||
@@ -98,10 +98,10 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
98 | } | 98 | } |
99 | 99 | ||
100 | /* 33 MHz is not 32 MHz... */ | 100 | /* 33 MHz is not 32 MHz... */ |
101 | if ((clockspeed_reg & 0xE0)==0xA0) | 101 | if ((clockspeed_reg & 0xE0) == 0xA0) |
102 | return 33000; | 102 | return 33000; |
103 | 103 | ||
104 | return ((1<<((clockspeed_reg & 0xE0) >> 5)) * 1000); | 104 | return (1<<((clockspeed_reg & 0xE0) >> 5)) * 1000; |
105 | } | 105 | } |
106 | 106 | ||
107 | 107 | ||
@@ -117,7 +117,7 @@ static unsigned int elanfreq_get_cpu_frequency(unsigned int cpu) | |||
117 | * There is no return value. | 117 | * There is no return value. |
118 | */ | 118 | */ |
119 | 119 | ||
120 | static void elanfreq_set_cpu_state (unsigned int state) | 120 | static void elanfreq_set_cpu_state(unsigned int state) |
121 | { | 121 | { |
122 | struct cpufreq_freqs freqs; | 122 | struct cpufreq_freqs freqs; |
123 | 123 | ||
@@ -144,20 +144,20 @@ static void elanfreq_set_cpu_state (unsigned int state) | |||
144 | */ | 144 | */ |
145 | 145 | ||
146 | local_irq_disable(); | 146 | local_irq_disable(); |
147 | outb_p(0x40,REG_CSCIR); /* Disable hyperspeed mode */ | 147 | outb_p(0x40, REG_CSCIR); /* Disable hyperspeed mode */ |
148 | outb_p(0x00,REG_CSCDR); | 148 | outb_p(0x00, REG_CSCDR); |
149 | local_irq_enable(); /* wait till internal pipelines and */ | 149 | local_irq_enable(); /* wait till internal pipelines and */ |
150 | udelay(1000); /* buffers have cleaned up */ | 150 | udelay(1000); /* buffers have cleaned up */ |
151 | 151 | ||
152 | local_irq_disable(); | 152 | local_irq_disable(); |
153 | 153 | ||
154 | /* now, set the CPU clock speed register (0x80) */ | 154 | /* now, set the CPU clock speed register (0x80) */ |
155 | outb_p(0x80,REG_CSCIR); | 155 | outb_p(0x80, REG_CSCIR); |
156 | outb_p(elan_multiplier[state].val80h,REG_CSCDR); | 156 | outb_p(elan_multiplier[state].val80h, REG_CSCDR); |
157 | 157 | ||
158 | /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ | 158 | /* now, the hyperspeed bit in PMU Force Mode Register (0x40) */ |
159 | outb_p(0x40,REG_CSCIR); | 159 | outb_p(0x40, REG_CSCIR); |
160 | outb_p(elan_multiplier[state].val40h,REG_CSCDR); | 160 | outb_p(elan_multiplier[state].val40h, REG_CSCDR); |
161 | udelay(10000); | 161 | udelay(10000); |
162 | local_irq_enable(); | 162 | local_irq_enable(); |
163 | 163 | ||
@@ -173,12 +173,12 @@ static void elanfreq_set_cpu_state (unsigned int state) | |||
173 | * for the hardware supported by the driver. | 173 | * for the hardware supported by the driver. |
174 | */ | 174 | */ |
175 | 175 | ||
176 | static int elanfreq_verify (struct cpufreq_policy *policy) | 176 | static int elanfreq_verify(struct cpufreq_policy *policy) |
177 | { | 177 | { |
178 | return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); | 178 | return cpufreq_frequency_table_verify(policy, &elanfreq_table[0]); |
179 | } | 179 | } |
180 | 180 | ||
181 | static int elanfreq_target (struct cpufreq_policy *policy, | 181 | static int elanfreq_target(struct cpufreq_policy *policy, |
182 | unsigned int target_freq, | 182 | unsigned int target_freq, |
183 | unsigned int relation) | 183 | unsigned int relation) |
184 | { | 184 | { |
@@ -205,7 +205,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
205 | 205 | ||
206 | /* capability check */ | 206 | /* capability check */ |
207 | if ((c->x86_vendor != X86_VENDOR_AMD) || | 207 | if ((c->x86_vendor != X86_VENDOR_AMD) || |
208 | (c->x86 != 4) || (c->x86_model!=10)) | 208 | (c->x86 != 4) || (c->x86_model != 10)) |
209 | return -ENODEV; | 209 | return -ENODEV; |
210 | 210 | ||
211 | /* max freq */ | 211 | /* max freq */ |
@@ -213,7 +213,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
213 | max_freq = elanfreq_get_cpu_frequency(0); | 213 | max_freq = elanfreq_get_cpu_frequency(0); |
214 | 214 | ||
215 | /* table init */ | 215 | /* table init */ |
216 | for (i=0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { | 216 | for (i = 0; (elanfreq_table[i].frequency != CPUFREQ_TABLE_END); i++) { |
217 | if (elanfreq_table[i].frequency > max_freq) | 217 | if (elanfreq_table[i].frequency > max_freq) |
218 | elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; | 218 | elanfreq_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
219 | } | 219 | } |
@@ -224,7 +224,7 @@ static int elanfreq_cpu_init(struct cpufreq_policy *policy) | |||
224 | 224 | ||
225 | result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); | 225 | result = cpufreq_frequency_table_cpuinfo(policy, elanfreq_table); |
226 | if (result) | 226 | if (result) |
227 | return (result); | 227 | return result; |
228 | 228 | ||
229 | cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); | 229 | cpufreq_frequency_table_get_attr(elanfreq_table, policy->cpu); |
230 | return 0; | 230 | return 0; |
@@ -260,7 +260,7 @@ __setup("elanfreq=", elanfreq_setup); | |||
260 | #endif | 260 | #endif |
261 | 261 | ||
262 | 262 | ||
263 | static struct freq_attr* elanfreq_attr[] = { | 263 | static struct freq_attr *elanfreq_attr[] = { |
264 | &cpufreq_freq_attr_scaling_available_freqs, | 264 | &cpufreq_freq_attr_scaling_available_freqs, |
265 | NULL, | 265 | NULL, |
266 | }; | 266 | }; |
@@ -284,9 +284,9 @@ static int __init elanfreq_init(void) | |||
284 | 284 | ||
285 | /* Test if we have the right hardware */ | 285 | /* Test if we have the right hardware */ |
286 | if ((c->x86_vendor != X86_VENDOR_AMD) || | 286 | if ((c->x86_vendor != X86_VENDOR_AMD) || |
287 | (c->x86 != 4) || (c->x86_model!=10)) { | 287 | (c->x86 != 4) || (c->x86_model != 10)) { |
288 | printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); | 288 | printk(KERN_INFO "elanfreq: error: no Elan processor found!\n"); |
289 | return -ENODEV; | 289 | return -ENODEV; |
290 | } | 290 | } |
291 | return cpufreq_register_driver(&elanfreq_driver); | 291 | return cpufreq_register_driver(&elanfreq_driver); |
292 | } | 292 | } |
@@ -298,7 +298,7 @@ static void __exit elanfreq_exit(void) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | 300 | ||
301 | module_param (max_freq, int, 0444); | 301 | module_param(max_freq, int, 0444); |
302 | 302 | ||
303 | MODULE_LICENSE("GPL"); | 303 | MODULE_LICENSE("GPL"); |
304 | MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>"); | 304 | MODULE_AUTHOR("Robert Schwebel <r.schwebel@pengutronix.de>, Sven Geggus <sven@geggus.net>"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c index eb9b62b0830c..b5ced806a316 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c | |||
@@ -15,12 +15,11 @@ | |||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | 16 | ||
17 | #include <asm/msr.h> | 17 | #include <asm/msr.h> |
18 | #include <asm/timex.h> | 18 | #include <linux/timex.h> |
19 | #include <asm/io.h> | 19 | #include <linux/io.h> |
20 | 20 | ||
21 | 21 | #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long | |
22 | #define POWERNOW_IOPORT 0xfff0 /* it doesn't matter where, as long | 22 | as it is unused */ |
23 | as it is unused */ | ||
24 | 23 | ||
25 | static unsigned int busfreq; /* FSB, in 10 kHz */ | 24 | static unsigned int busfreq; /* FSB, in 10 kHz */ |
26 | static unsigned int max_multiplier; | 25 | static unsigned int max_multiplier; |
@@ -53,7 +52,7 @@ static int powernow_k6_get_cpu_multiplier(void) | |||
53 | 52 | ||
54 | msrval = POWERNOW_IOPORT + 0x1; | 53 | msrval = POWERNOW_IOPORT + 0x1; |
55 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ | 54 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ |
56 | invalue=inl(POWERNOW_IOPORT + 0x8); | 55 | invalue = inl(POWERNOW_IOPORT + 0x8); |
57 | msrval = POWERNOW_IOPORT + 0x0; | 56 | msrval = POWERNOW_IOPORT + 0x0; |
58 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ | 57 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ |
59 | 58 | ||
@@ -67,9 +66,9 @@ static int powernow_k6_get_cpu_multiplier(void) | |||
67 | * | 66 | * |
68 | * Tries to change the PowerNow! multiplier | 67 | * Tries to change the PowerNow! multiplier |
69 | */ | 68 | */ |
70 | static void powernow_k6_set_state (unsigned int best_i) | 69 | static void powernow_k6_set_state(unsigned int best_i) |
71 | { | 70 | { |
72 | unsigned long outvalue=0, invalue=0; | 71 | unsigned long outvalue = 0, invalue = 0; |
73 | unsigned long msrval; | 72 | unsigned long msrval; |
74 | struct cpufreq_freqs freqs; | 73 | struct cpufreq_freqs freqs; |
75 | 74 | ||
@@ -90,10 +89,10 @@ static void powernow_k6_set_state (unsigned int best_i) | |||
90 | 89 | ||
91 | msrval = POWERNOW_IOPORT + 0x1; | 90 | msrval = POWERNOW_IOPORT + 0x1; |
92 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ | 91 | wrmsr(MSR_K6_EPMR, msrval, 0); /* enable the PowerNow port */ |
93 | invalue=inl(POWERNOW_IOPORT + 0x8); | 92 | invalue = inl(POWERNOW_IOPORT + 0x8); |
94 | invalue = invalue & 0xf; | 93 | invalue = invalue & 0xf; |
95 | outvalue = outvalue | invalue; | 94 | outvalue = outvalue | invalue; |
96 | outl(outvalue ,(POWERNOW_IOPORT + 0x8)); | 95 | outl(outvalue , (POWERNOW_IOPORT + 0x8)); |
97 | msrval = POWERNOW_IOPORT + 0x0; | 96 | msrval = POWERNOW_IOPORT + 0x0; |
98 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ | 97 | wrmsr(MSR_K6_EPMR, msrval, 0); /* disable it again */ |
99 | 98 | ||
@@ -124,7 +123,7 @@ static int powernow_k6_verify(struct cpufreq_policy *policy) | |||
124 | * | 123 | * |
125 | * sets a new CPUFreq policy | 124 | * sets a new CPUFreq policy |
126 | */ | 125 | */ |
127 | static int powernow_k6_target (struct cpufreq_policy *policy, | 126 | static int powernow_k6_target(struct cpufreq_policy *policy, |
128 | unsigned int target_freq, | 127 | unsigned int target_freq, |
129 | unsigned int relation) | 128 | unsigned int relation) |
130 | { | 129 | { |
@@ -152,7 +151,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
152 | busfreq = cpu_khz / max_multiplier; | 151 | busfreq = cpu_khz / max_multiplier; |
153 | 152 | ||
154 | /* table init */ | 153 | /* table init */ |
155 | for (i=0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { | 154 | for (i = 0; (clock_ratio[i].frequency != CPUFREQ_TABLE_END); i++) { |
156 | if (clock_ratio[i].index > max_multiplier) | 155 | if (clock_ratio[i].index > max_multiplier) |
157 | clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; | 156 | clock_ratio[i].frequency = CPUFREQ_ENTRY_INVALID; |
158 | else | 157 | else |
@@ -165,7 +164,7 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
165 | 164 | ||
166 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); | 165 | result = cpufreq_frequency_table_cpuinfo(policy, clock_ratio); |
167 | if (result) | 166 | if (result) |
168 | return (result); | 167 | return result; |
169 | 168 | ||
170 | cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); | 169 | cpufreq_frequency_table_get_attr(clock_ratio, policy->cpu); |
171 | 170 | ||
@@ -176,8 +175,8 @@ static int powernow_k6_cpu_init(struct cpufreq_policy *policy) | |||
176 | static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) | 175 | static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) |
177 | { | 176 | { |
178 | unsigned int i; | 177 | unsigned int i; |
179 | for (i=0; i<8; i++) { | 178 | for (i = 0; i < 8; i++) { |
180 | if (i==max_multiplier) | 179 | if (i == max_multiplier) |
181 | powernow_k6_set_state(i); | 180 | powernow_k6_set_state(i); |
182 | } | 181 | } |
183 | cpufreq_frequency_table_put_attr(policy->cpu); | 182 | cpufreq_frequency_table_put_attr(policy->cpu); |
@@ -189,7 +188,7 @@ static unsigned int powernow_k6_get(unsigned int cpu) | |||
189 | return busfreq * powernow_k6_get_cpu_multiplier(); | 188 | return busfreq * powernow_k6_get_cpu_multiplier(); |
190 | } | 189 | } |
191 | 190 | ||
192 | static struct freq_attr* powernow_k6_attr[] = { | 191 | static struct freq_attr *powernow_k6_attr[] = { |
193 | &cpufreq_freq_attr_scaling_available_freqs, | 192 | &cpufreq_freq_attr_scaling_available_freqs, |
194 | NULL, | 193 | NULL, |
195 | }; | 194 | }; |
@@ -227,7 +226,7 @@ static int __init powernow_k6_init(void) | |||
227 | } | 226 | } |
228 | 227 | ||
229 | if (cpufreq_register_driver(&powernow_k6_driver)) { | 228 | if (cpufreq_register_driver(&powernow_k6_driver)) { |
230 | release_region (POWERNOW_IOPORT, 16); | 229 | release_region(POWERNOW_IOPORT, 16); |
231 | return -EINVAL; | 230 | return -EINVAL; |
232 | } | 231 | } |
233 | 232 | ||
@@ -243,13 +242,13 @@ static int __init powernow_k6_init(void) | |||
243 | static void __exit powernow_k6_exit(void) | 242 | static void __exit powernow_k6_exit(void) |
244 | { | 243 | { |
245 | cpufreq_unregister_driver(&powernow_k6_driver); | 244 | cpufreq_unregister_driver(&powernow_k6_driver); |
246 | release_region (POWERNOW_IOPORT, 16); | 245 | release_region(POWERNOW_IOPORT, 16); |
247 | } | 246 | } |
248 | 247 | ||
249 | 248 | ||
250 | MODULE_AUTHOR ("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); | 249 | MODULE_AUTHOR("Arjan van de Ven <arjanv@redhat.com>, Dave Jones <davej@codemonkey.org.uk>, Dominik Brodowski <linux@brodo.de>"); |
251 | MODULE_DESCRIPTION ("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); | 250 | MODULE_DESCRIPTION("PowerNow! driver for AMD K6-2+ / K6-3+ processors."); |
252 | MODULE_LICENSE ("GPL"); | 251 | MODULE_LICENSE("GPL"); |
253 | 252 | ||
254 | module_init(powernow_k6_init); | 253 | module_init(powernow_k6_init); |
255 | module_exit(powernow_k6_exit); | 254 | module_exit(powernow_k6_exit); |
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 8a67f16987db..31d6f535a79d 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -1467,25 +1467,27 @@ int cpufreq_driver_target(struct cpufreq_policy *policy, | |||
1467 | unsigned int target_freq, | 1467 | unsigned int target_freq, |
1468 | unsigned int relation) | 1468 | unsigned int relation) |
1469 | { | 1469 | { |
1470 | int ret; | 1470 | int ret = -EINVAL; |
1471 | 1471 | ||
1472 | policy = cpufreq_cpu_get(policy->cpu); | 1472 | policy = cpufreq_cpu_get(policy->cpu); |
1473 | if (!policy) | 1473 | if (!policy) |
1474 | return -EINVAL; | 1474 | goto no_policy; |
1475 | 1475 | ||
1476 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) | 1476 | if (unlikely(lock_policy_rwsem_write(policy->cpu))) |
1477 | return -EINVAL; | 1477 | goto fail; |
1478 | 1478 | ||
1479 | ret = __cpufreq_driver_target(policy, target_freq, relation); | 1479 | ret = __cpufreq_driver_target(policy, target_freq, relation); |
1480 | 1480 | ||
1481 | unlock_policy_rwsem_write(policy->cpu); | 1481 | unlock_policy_rwsem_write(policy->cpu); |
1482 | 1482 | ||
1483 | fail: | ||
1483 | cpufreq_cpu_put(policy); | 1484 | cpufreq_cpu_put(policy); |
1485 | no_policy: | ||
1484 | return ret; | 1486 | return ret; |
1485 | } | 1487 | } |
1486 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); | 1488 | EXPORT_SYMBOL_GPL(cpufreq_driver_target); |
1487 | 1489 | ||
1488 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy) | 1490 | int __cpufreq_driver_getavg(struct cpufreq_policy *policy, unsigned int cpu) |
1489 | { | 1491 | { |
1490 | int ret = 0; | 1492 | int ret = 0; |
1491 | 1493 | ||
@@ -1493,8 +1495,8 @@ int __cpufreq_driver_getavg(struct cpufreq_policy *policy) | |||
1493 | if (!policy) | 1495 | if (!policy) |
1494 | return -EINVAL; | 1496 | return -EINVAL; |
1495 | 1497 | ||
1496 | if (cpu_online(policy->cpu) && cpufreq_driver->getavg) | 1498 | if (cpu_online(cpu) && cpufreq_driver->getavg) |
1497 | ret = cpufreq_driver->getavg(policy->cpu); | 1499 | ret = cpufreq_driver->getavg(policy, cpu); |
1498 | 1500 | ||
1499 | cpufreq_cpu_put(policy); | 1501 | cpufreq_cpu_put(policy); |
1500 | return ret; | 1502 | return ret; |
@@ -1717,13 +1719,17 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1717 | { | 1719 | { |
1718 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); | 1720 | struct cpufreq_policy *data = cpufreq_cpu_get(cpu); |
1719 | struct cpufreq_policy policy; | 1721 | struct cpufreq_policy policy; |
1720 | int ret = 0; | 1722 | int ret; |
1721 | 1723 | ||
1722 | if (!data) | 1724 | if (!data) { |
1723 | return -ENODEV; | 1725 | ret = -ENODEV; |
1726 | goto no_policy; | ||
1727 | } | ||
1724 | 1728 | ||
1725 | if (unlikely(lock_policy_rwsem_write(cpu))) | 1729 | if (unlikely(lock_policy_rwsem_write(cpu))) { |
1726 | return -EINVAL; | 1730 | ret = -EINVAL; |
1731 | goto fail; | ||
1732 | } | ||
1727 | 1733 | ||
1728 | dprintk("updating policy for CPU %u\n", cpu); | 1734 | dprintk("updating policy for CPU %u\n", cpu); |
1729 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); | 1735 | memcpy(&policy, data, sizeof(struct cpufreq_policy)); |
@@ -1750,7 +1756,9 @@ int cpufreq_update_policy(unsigned int cpu) | |||
1750 | 1756 | ||
1751 | unlock_policy_rwsem_write(cpu); | 1757 | unlock_policy_rwsem_write(cpu); |
1752 | 1758 | ||
1759 | fail: | ||
1753 | cpufreq_cpu_put(data); | 1760 | cpufreq_cpu_put(data); |
1761 | no_policy: | ||
1754 | return ret; | 1762 | return ret; |
1755 | } | 1763 | } |
1756 | EXPORT_SYMBOL(cpufreq_update_policy); | 1764 | EXPORT_SYMBOL(cpufreq_update_policy); |
diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index ac0bbf2d234f..e2657837d954 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c | |||
@@ -460,6 +460,7 @@ static void do_dbs_timer(struct work_struct *work) | |||
460 | 460 | ||
461 | static inline void dbs_timer_init(void) | 461 | static inline void dbs_timer_init(void) |
462 | { | 462 | { |
463 | init_timer_deferrable(&dbs_work.timer); | ||
463 | schedule_delayed_work(&dbs_work, | 464 | schedule_delayed_work(&dbs_work, |
464 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); | 465 | usecs_to_jiffies(dbs_tuners_ins.sampling_rate)); |
465 | return; | 466 | return; |
@@ -575,13 +576,15 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
575 | return 0; | 576 | return 0; |
576 | } | 577 | } |
577 | 578 | ||
579 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE | ||
580 | static | ||
581 | #endif | ||
578 | struct cpufreq_governor cpufreq_gov_conservative = { | 582 | struct cpufreq_governor cpufreq_gov_conservative = { |
579 | .name = "conservative", | 583 | .name = "conservative", |
580 | .governor = cpufreq_governor_dbs, | 584 | .governor = cpufreq_governor_dbs, |
581 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | 585 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
582 | .owner = THIS_MODULE, | 586 | .owner = THIS_MODULE, |
583 | }; | 587 | }; |
584 | EXPORT_SYMBOL(cpufreq_gov_conservative); | ||
585 | 588 | ||
586 | static int __init cpufreq_gov_dbs_init(void) | 589 | static int __init cpufreq_gov_dbs_init(void) |
587 | { | 590 | { |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 33855cb3cf16..2ab3c12b88af 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -18,13 +18,19 @@ | |||
18 | #include <linux/jiffies.h> | 18 | #include <linux/jiffies.h> |
19 | #include <linux/kernel_stat.h> | 19 | #include <linux/kernel_stat.h> |
20 | #include <linux/mutex.h> | 20 | #include <linux/mutex.h> |
21 | #include <linux/hrtimer.h> | ||
22 | #include <linux/tick.h> | ||
23 | #include <linux/ktime.h> | ||
21 | 24 | ||
22 | /* | 25 | /* |
23 | * dbs is used in this file as a shortform for demandbased switching | 26 | * dbs is used in this file as a shortform for demandbased switching |
24 | * It helps to keep variable names smaller, simpler | 27 | * It helps to keep variable names smaller, simpler |
25 | */ | 28 | */ |
26 | 29 | ||
30 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | ||
27 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 31 | #define DEF_FREQUENCY_UP_THRESHOLD (80) |
32 | #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) | ||
33 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) | ||
28 | #define MIN_FREQUENCY_UP_THRESHOLD (11) | 34 | #define MIN_FREQUENCY_UP_THRESHOLD (11) |
29 | #define MAX_FREQUENCY_UP_THRESHOLD (100) | 35 | #define MAX_FREQUENCY_UP_THRESHOLD (100) |
30 | 36 | ||
@@ -57,6 +63,7 @@ enum {DBS_NORMAL_SAMPLE, DBS_SUB_SAMPLE}; | |||
57 | struct cpu_dbs_info_s { | 63 | struct cpu_dbs_info_s { |
58 | cputime64_t prev_cpu_idle; | 64 | cputime64_t prev_cpu_idle; |
59 | cputime64_t prev_cpu_wall; | 65 | cputime64_t prev_cpu_wall; |
66 | cputime64_t prev_cpu_nice; | ||
60 | struct cpufreq_policy *cur_policy; | 67 | struct cpufreq_policy *cur_policy; |
61 | struct delayed_work work; | 68 | struct delayed_work work; |
62 | struct cpufreq_frequency_table *freq_table; | 69 | struct cpufreq_frequency_table *freq_table; |
@@ -86,21 +93,24 @@ static struct workqueue_struct *kondemand_wq; | |||
86 | static struct dbs_tuners { | 93 | static struct dbs_tuners { |
87 | unsigned int sampling_rate; | 94 | unsigned int sampling_rate; |
88 | unsigned int up_threshold; | 95 | unsigned int up_threshold; |
96 | unsigned int down_differential; | ||
89 | unsigned int ignore_nice; | 97 | unsigned int ignore_nice; |
90 | unsigned int powersave_bias; | 98 | unsigned int powersave_bias; |
91 | } dbs_tuners_ins = { | 99 | } dbs_tuners_ins = { |
92 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 100 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, |
101 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | ||
93 | .ignore_nice = 0, | 102 | .ignore_nice = 0, |
94 | .powersave_bias = 0, | 103 | .powersave_bias = 0, |
95 | }; | 104 | }; |
96 | 105 | ||
97 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | 106 | static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu, |
107 | cputime64_t *wall) | ||
98 | { | 108 | { |
99 | cputime64_t idle_time; | 109 | cputime64_t idle_time; |
100 | cputime64_t cur_jiffies; | 110 | cputime64_t cur_wall_time; |
101 | cputime64_t busy_time; | 111 | cputime64_t busy_time; |
102 | 112 | ||
103 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | 113 | cur_wall_time = jiffies64_to_cputime64(get_jiffies_64()); |
104 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, | 114 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, |
105 | kstat_cpu(cpu).cpustat.system); | 115 | kstat_cpu(cpu).cpustat.system); |
106 | 116 | ||
@@ -113,7 +123,37 @@ static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | |||
113 | kstat_cpu(cpu).cpustat.nice); | 123 | kstat_cpu(cpu).cpustat.nice); |
114 | } | 124 | } |
115 | 125 | ||
116 | idle_time = cputime64_sub(cur_jiffies, busy_time); | 126 | idle_time = cputime64_sub(cur_wall_time, busy_time); |
127 | if (wall) | ||
128 | *wall = cur_wall_time; | ||
129 | |||
130 | return idle_time; | ||
131 | } | ||
132 | |||
133 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall) | ||
134 | { | ||
135 | u64 idle_time = get_cpu_idle_time_us(cpu, wall); | ||
136 | |||
137 | if (idle_time == -1ULL) | ||
138 | return get_cpu_idle_time_jiffy(cpu, wall); | ||
139 | |||
140 | if (dbs_tuners_ins.ignore_nice) { | ||
141 | cputime64_t cur_nice; | ||
142 | unsigned long cur_nice_jiffies; | ||
143 | struct cpu_dbs_info_s *dbs_info; | ||
144 | |||
145 | dbs_info = &per_cpu(cpu_dbs_info, cpu); | ||
146 | cur_nice = cputime64_sub(kstat_cpu(cpu).cpustat.nice, | ||
147 | dbs_info->prev_cpu_nice); | ||
148 | /* | ||
149 | * Assumption: nice time between sampling periods will be | ||
150 | * less than 2^32 jiffies for 32 bit sys | ||
151 | */ | ||
152 | cur_nice_jiffies = (unsigned long) | ||
153 | cputime64_to_jiffies64(cur_nice); | ||
154 | dbs_info->prev_cpu_nice = kstat_cpu(cpu).cpustat.nice; | ||
155 | return idle_time + jiffies_to_usecs(cur_nice_jiffies); | ||
156 | } | ||
117 | return idle_time; | 157 | return idle_time; |
118 | } | 158 | } |
119 | 159 | ||
@@ -277,8 +317,8 @@ static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy, | |||
277 | for_each_online_cpu(j) { | 317 | for_each_online_cpu(j) { |
278 | struct cpu_dbs_info_s *dbs_info; | 318 | struct cpu_dbs_info_s *dbs_info; |
279 | dbs_info = &per_cpu(cpu_dbs_info, j); | 319 | dbs_info = &per_cpu(cpu_dbs_info, j); |
280 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | 320 | dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
281 | dbs_info->prev_cpu_wall = get_jiffies_64(); | 321 | &dbs_info->prev_cpu_wall); |
282 | } | 322 | } |
283 | mutex_unlock(&dbs_mutex); | 323 | mutex_unlock(&dbs_mutex); |
284 | 324 | ||
@@ -334,9 +374,7 @@ static struct attribute_group dbs_attr_group = { | |||
334 | 374 | ||
335 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 375 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
336 | { | 376 | { |
337 | unsigned int idle_ticks, total_ticks; | 377 | unsigned int max_load_freq; |
338 | unsigned int load = 0; | ||
339 | cputime64_t cur_jiffies; | ||
340 | 378 | ||
341 | struct cpufreq_policy *policy; | 379 | struct cpufreq_policy *policy; |
342 | unsigned int j; | 380 | unsigned int j; |
@@ -346,13 +384,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
346 | 384 | ||
347 | this_dbs_info->freq_lo = 0; | 385 | this_dbs_info->freq_lo = 0; |
348 | policy = this_dbs_info->cur_policy; | 386 | policy = this_dbs_info->cur_policy; |
349 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | ||
350 | total_ticks = (unsigned int) cputime64_sub(cur_jiffies, | ||
351 | this_dbs_info->prev_cpu_wall); | ||
352 | this_dbs_info->prev_cpu_wall = get_jiffies_64(); | ||
353 | 387 | ||
354 | if (!total_ticks) | ||
355 | return; | ||
356 | /* | 388 | /* |
357 | * Every sampling_rate, we check, if current idle time is less | 389 | * Every sampling_rate, we check, if current idle time is less |
358 | * than 20% (default), then we try to increase frequency | 390 | * than 20% (default), then we try to increase frequency |
@@ -365,27 +397,44 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
365 | * 5% (default) of current frequency | 397 | * 5% (default) of current frequency |
366 | */ | 398 | */ |
367 | 399 | ||
368 | /* Get Idle Time */ | 400 | /* Get Absolute Load - in terms of freq */ |
369 | idle_ticks = UINT_MAX; | 401 | max_load_freq = 0; |
402 | |||
370 | for_each_cpu_mask_nr(j, policy->cpus) { | 403 | for_each_cpu_mask_nr(j, policy->cpus) { |
371 | cputime64_t total_idle_ticks; | ||
372 | unsigned int tmp_idle_ticks; | ||
373 | struct cpu_dbs_info_s *j_dbs_info; | 404 | struct cpu_dbs_info_s *j_dbs_info; |
405 | cputime64_t cur_wall_time, cur_idle_time; | ||
406 | unsigned int idle_time, wall_time; | ||
407 | unsigned int load, load_freq; | ||
408 | int freq_avg; | ||
374 | 409 | ||
375 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 410 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
376 | total_idle_ticks = get_cpu_idle_time(j); | 411 | |
377 | tmp_idle_ticks = (unsigned int) cputime64_sub(total_idle_ticks, | 412 | cur_idle_time = get_cpu_idle_time(j, &cur_wall_time); |
413 | |||
414 | wall_time = (unsigned int) cputime64_sub(cur_wall_time, | ||
415 | j_dbs_info->prev_cpu_wall); | ||
416 | j_dbs_info->prev_cpu_wall = cur_wall_time; | ||
417 | |||
418 | idle_time = (unsigned int) cputime64_sub(cur_idle_time, | ||
378 | j_dbs_info->prev_cpu_idle); | 419 | j_dbs_info->prev_cpu_idle); |
379 | j_dbs_info->prev_cpu_idle = total_idle_ticks; | 420 | j_dbs_info->prev_cpu_idle = cur_idle_time; |
421 | |||
422 | if (unlikely(!wall_time || wall_time < idle_time)) | ||
423 | continue; | ||
424 | |||
425 | load = 100 * (wall_time - idle_time) / wall_time; | ||
426 | |||
427 | freq_avg = __cpufreq_driver_getavg(policy, j); | ||
428 | if (freq_avg <= 0) | ||
429 | freq_avg = policy->cur; | ||
380 | 430 | ||
381 | if (tmp_idle_ticks < idle_ticks) | 431 | load_freq = load * freq_avg; |
382 | idle_ticks = tmp_idle_ticks; | 432 | if (load_freq > max_load_freq) |
433 | max_load_freq = load_freq; | ||
383 | } | 434 | } |
384 | if (likely(total_ticks > idle_ticks)) | ||
385 | load = (100 * (total_ticks - idle_ticks)) / total_ticks; | ||
386 | 435 | ||
387 | /* Check for frequency increase */ | 436 | /* Check for frequency increase */ |
388 | if (load > dbs_tuners_ins.up_threshold) { | 437 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { |
389 | /* if we are already at full speed then break out early */ | 438 | /* if we are already at full speed then break out early */ |
390 | if (!dbs_tuners_ins.powersave_bias) { | 439 | if (!dbs_tuners_ins.powersave_bias) { |
391 | if (policy->cur == policy->max) | 440 | if (policy->cur == policy->max) |
@@ -412,15 +461,13 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
412 | * can support the current CPU usage without triggering the up | 461 | * can support the current CPU usage without triggering the up |
413 | * policy. To be safe, we focus 10 points under the threshold. | 462 | * policy. To be safe, we focus 10 points under the threshold. |
414 | */ | 463 | */ |
415 | if (load < (dbs_tuners_ins.up_threshold - 10)) { | 464 | if (max_load_freq < |
416 | unsigned int freq_next, freq_cur; | 465 | (dbs_tuners_ins.up_threshold - dbs_tuners_ins.down_differential) * |
417 | 466 | policy->cur) { | |
418 | freq_cur = __cpufreq_driver_getavg(policy); | 467 | unsigned int freq_next; |
419 | if (!freq_cur) | 468 | freq_next = max_load_freq / |
420 | freq_cur = policy->cur; | 469 | (dbs_tuners_ins.up_threshold - |
421 | 470 | dbs_tuners_ins.down_differential); | |
422 | freq_next = (freq_cur * load) / | ||
423 | (dbs_tuners_ins.up_threshold - 10); | ||
424 | 471 | ||
425 | if (!dbs_tuners_ins.powersave_bias) { | 472 | if (!dbs_tuners_ins.powersave_bias) { |
426 | __cpufreq_driver_target(policy, freq_next, | 473 | __cpufreq_driver_target(policy, freq_next, |
@@ -526,8 +573,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
526 | j_dbs_info = &per_cpu(cpu_dbs_info, j); | 573 | j_dbs_info = &per_cpu(cpu_dbs_info, j); |
527 | j_dbs_info->cur_policy = policy; | 574 | j_dbs_info->cur_policy = policy; |
528 | 575 | ||
529 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j); | 576 | j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j, |
530 | j_dbs_info->prev_cpu_wall = get_jiffies_64(); | 577 | &j_dbs_info->prev_cpu_wall); |
531 | } | 578 | } |
532 | this_dbs_info->cpu = cpu; | 579 | this_dbs_info->cpu = cpu; |
533 | /* | 580 | /* |
@@ -579,22 +626,42 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
579 | return 0; | 626 | return 0; |
580 | } | 627 | } |
581 | 628 | ||
629 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND | ||
630 | static | ||
631 | #endif | ||
582 | struct cpufreq_governor cpufreq_gov_ondemand = { | 632 | struct cpufreq_governor cpufreq_gov_ondemand = { |
583 | .name = "ondemand", | 633 | .name = "ondemand", |
584 | .governor = cpufreq_governor_dbs, | 634 | .governor = cpufreq_governor_dbs, |
585 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, | 635 | .max_transition_latency = TRANSITION_LATENCY_LIMIT, |
586 | .owner = THIS_MODULE, | 636 | .owner = THIS_MODULE, |
587 | }; | 637 | }; |
588 | EXPORT_SYMBOL(cpufreq_gov_ondemand); | ||
589 | 638 | ||
590 | static int __init cpufreq_gov_dbs_init(void) | 639 | static int __init cpufreq_gov_dbs_init(void) |
591 | { | 640 | { |
641 | int err; | ||
642 | cputime64_t wall; | ||
643 | u64 idle_time; | ||
644 | int cpu = get_cpu(); | ||
645 | |||
646 | idle_time = get_cpu_idle_time_us(cpu, &wall); | ||
647 | put_cpu(); | ||
648 | if (idle_time != -1ULL) { | ||
649 | /* Idle micro accounting is supported. Use finer thresholds */ | ||
650 | dbs_tuners_ins.up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; | ||
651 | dbs_tuners_ins.down_differential = | ||
652 | MICRO_FREQUENCY_DOWN_DIFFERENTIAL; | ||
653 | } | ||
654 | |||
592 | kondemand_wq = create_workqueue("kondemand"); | 655 | kondemand_wq = create_workqueue("kondemand"); |
593 | if (!kondemand_wq) { | 656 | if (!kondemand_wq) { |
594 | printk(KERN_ERR "Creation of kondemand failed\n"); | 657 | printk(KERN_ERR "Creation of kondemand failed\n"); |
595 | return -EFAULT; | 658 | return -EFAULT; |
596 | } | 659 | } |
597 | return cpufreq_register_governor(&cpufreq_gov_ondemand); | 660 | err = cpufreq_register_governor(&cpufreq_gov_ondemand); |
661 | if (err) | ||
662 | destroy_workqueue(kondemand_wq); | ||
663 | |||
664 | return err; | ||
598 | } | 665 | } |
599 | 666 | ||
600 | static void __exit cpufreq_gov_dbs_exit(void) | 667 | static void __exit cpufreq_gov_dbs_exit(void) |
diff --git a/drivers/cpufreq/cpufreq_performance.c b/drivers/cpufreq/cpufreq_performance.c index e8e1451ef1c1..7e2e515087f8 100644 --- a/drivers/cpufreq/cpufreq_performance.c +++ b/drivers/cpufreq/cpufreq_performance.c | |||
@@ -36,12 +36,14 @@ static int cpufreq_governor_performance(struct cpufreq_policy *policy, | |||
36 | return 0; | 36 | return 0; |
37 | } | 37 | } |
38 | 38 | ||
39 | #ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE_MODULE | ||
40 | static | ||
41 | #endif | ||
39 | struct cpufreq_governor cpufreq_gov_performance = { | 42 | struct cpufreq_governor cpufreq_gov_performance = { |
40 | .name = "performance", | 43 | .name = "performance", |
41 | .governor = cpufreq_governor_performance, | 44 | .governor = cpufreq_governor_performance, |
42 | .owner = THIS_MODULE, | 45 | .owner = THIS_MODULE, |
43 | }; | 46 | }; |
44 | EXPORT_SYMBOL(cpufreq_gov_performance); | ||
45 | 47 | ||
46 | 48 | ||
47 | static int __init cpufreq_gov_performance_init(void) | 49 | static int __init cpufreq_gov_performance_init(void) |
diff --git a/drivers/cpufreq/cpufreq_powersave.c b/drivers/cpufreq/cpufreq_powersave.c index 88d2f44fba48..e6db5faf3eb1 100644 --- a/drivers/cpufreq/cpufreq_powersave.c +++ b/drivers/cpufreq/cpufreq_powersave.c | |||
@@ -35,12 +35,14 @@ static int cpufreq_governor_powersave(struct cpufreq_policy *policy, | |||
35 | return 0; | 35 | return 0; |
36 | } | 36 | } |
37 | 37 | ||
38 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE | ||
39 | static | ||
40 | #endif | ||
38 | struct cpufreq_governor cpufreq_gov_powersave = { | 41 | struct cpufreq_governor cpufreq_gov_powersave = { |
39 | .name = "powersave", | 42 | .name = "powersave", |
40 | .governor = cpufreq_governor_powersave, | 43 | .governor = cpufreq_governor_powersave, |
41 | .owner = THIS_MODULE, | 44 | .owner = THIS_MODULE, |
42 | }; | 45 | }; |
43 | EXPORT_SYMBOL(cpufreq_gov_powersave); | ||
44 | 46 | ||
45 | static int __init cpufreq_gov_powersave_init(void) | 47 | static int __init cpufreq_gov_powersave_init(void) |
46 | { | 48 | { |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index 32244aa7cc0c..1442bbada053 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -187,6 +187,9 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
187 | } | 187 | } |
188 | 188 | ||
189 | 189 | ||
190 | #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE | ||
191 | static | ||
192 | #endif | ||
190 | struct cpufreq_governor cpufreq_gov_userspace = { | 193 | struct cpufreq_governor cpufreq_gov_userspace = { |
191 | .name = "userspace", | 194 | .name = "userspace", |
192 | .governor = cpufreq_governor_userspace, | 195 | .governor = cpufreq_governor_userspace, |
@@ -194,7 +197,6 @@ struct cpufreq_governor cpufreq_gov_userspace = { | |||
194 | .show_setspeed = show_speed, | 197 | .show_setspeed = show_speed, |
195 | .owner = THIS_MODULE, | 198 | .owner = THIS_MODULE, |
196 | }; | 199 | }; |
197 | EXPORT_SYMBOL(cpufreq_gov_userspace); | ||
198 | 200 | ||
199 | static int __init cpufreq_gov_userspace_init(void) | 201 | static int __init cpufreq_gov_userspace_init(void) |
200 | { | 202 | { |
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h index 6fd5668aa572..1ee608fd7b77 100644 --- a/include/linux/cpufreq.h +++ b/include/linux/cpufreq.h | |||
@@ -187,7 +187,8 @@ extern int __cpufreq_driver_target(struct cpufreq_policy *policy, | |||
187 | unsigned int relation); | 187 | unsigned int relation); |
188 | 188 | ||
189 | 189 | ||
190 | extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy); | 190 | extern int __cpufreq_driver_getavg(struct cpufreq_policy *policy, |
191 | unsigned int cpu); | ||
191 | 192 | ||
192 | int cpufreq_register_governor(struct cpufreq_governor *governor); | 193 | int cpufreq_register_governor(struct cpufreq_governor *governor); |
193 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); | 194 | void cpufreq_unregister_governor(struct cpufreq_governor *governor); |
@@ -226,7 +227,9 @@ struct cpufreq_driver { | |||
226 | unsigned int (*get) (unsigned int cpu); | 227 | unsigned int (*get) (unsigned int cpu); |
227 | 228 | ||
228 | /* optional */ | 229 | /* optional */ |
229 | unsigned int (*getavg) (unsigned int cpu); | 230 | unsigned int (*getavg) (struct cpufreq_policy *policy, |
231 | unsigned int cpu); | ||
232 | |||
230 | int (*exit) (struct cpufreq_policy *policy); | 233 | int (*exit) (struct cpufreq_policy *policy); |
231 | int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); | 234 | int (*suspend) (struct cpufreq_policy *policy, pm_message_t pmsg); |
232 | int (*resume) (struct cpufreq_policy *policy); | 235 | int (*resume) (struct cpufreq_policy *policy); |
diff --git a/include/linux/tick.h b/include/linux/tick.h index 8cf8cfe2cc97..98921a3e1aa8 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
@@ -126,7 +126,7 @@ static inline ktime_t tick_nohz_get_sleep_length(void) | |||
126 | return len; | 126 | return len; |
127 | } | 127 | } |
128 | static inline void tick_nohz_stop_idle(int cpu) { } | 128 | static inline void tick_nohz_stop_idle(int cpu) { } |
129 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return 0; } | 129 | static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; } |
130 | # endif /* !NO_HZ */ | 130 | # endif /* !NO_HZ */ |
131 | 131 | ||
132 | #endif | 132 | #endif |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index cb02324bdb88..a4d219398167 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/profile.h> | 20 | #include <linux/profile.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/tick.h> | 22 | #include <linux/tick.h> |
23 | #include <linux/module.h> | ||
23 | 24 | ||
24 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
25 | 26 | ||
@@ -190,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time) | |||
190 | { | 191 | { |
191 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); | 192 | struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); |
192 | 193 | ||
193 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | 194 | if (!tick_nohz_enabled) |
195 | return -1; | ||
196 | |||
197 | if (ts->idle_active) | ||
198 | *last_update_time = ktime_to_us(ts->idle_lastupdate); | ||
199 | else | ||
200 | *last_update_time = ktime_to_us(ktime_get()); | ||
201 | |||
194 | return ktime_to_us(ts->idle_sleeptime); | 202 | return ktime_to_us(ts->idle_sleeptime); |
195 | } | 203 | } |
204 | EXPORT_SYMBOL_GPL(get_cpu_idle_time_us); | ||
196 | 205 | ||
197 | /** | 206 | /** |
198 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task | 207 | * tick_nohz_stop_sched_tick - stop the idle tick from the idle task |