diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 16:42:43 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-12 16:42:43 -0400 |
commit | 702ed6ef375c19d65f2eeeefd3851476f2c4cee4 (patch) | |
tree | fe46588dcc716f64a04310797d9446573614d3fc | |
parent | 2f41fc806434f8466bb361570589a3f6099ca65d (diff) | |
parent | 58a7295bc8073b9e668c329cb9ceb5b668c2b15d (diff) |
Merge master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq
* master.kernel.org:/pub/scm/linux/kernel/git/davej/cpufreq:
[CPUFREQ] Fix sysfs_create_file return value handling
[CPUFREQ] ondemand: fix tickless accounting and software coordination bug
[CPUFREQ] ondemand: add a check to avoid negative load calculation
[CPUFREQ] Keep userspace governor quiet when it is not being used
[CPUFREQ] Longhaul - Proper register access
[CPUFREQ] Kconfig powernow-k8 driver should depend on ACPI P-States driver
[CPUFREQ] Longhaul - Replace ACPI functions with direct I/O
[CPUFREQ] Longhaul - Remove duplicate multipliers
[CPUFREQ] Longhaul - Embedded "conservative"
[CPUFREQ] acpi-cpufreq: Proper ReadModifyWrite of PERF_CTL MSR
[CPUFREQ] check return value of sysfs_create_file
[CPUFREQ] Longhaul - Check ACPI "BM DMA in progress" bit
[CPUFREQ] Longhaul - Move old_ratio to correct place
[CPUFREQ] Longhaul - VT8237 support
[CPUFREQ] Longhaul - Use all kinds of support
[CPUFREQ] powernow-k8: clarify number of cores.
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/Kconfig | 13 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | 13 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.c | 209 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/longhaul.h | 12 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/cpufreq/powernow-k8.c | 5 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq.c | 18 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 30 | ||||
-rw-r--r-- | drivers/cpufreq/cpufreq_userspace.c | 23 |
8 files changed, 216 insertions, 107 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/Kconfig b/arch/i386/kernel/cpu/cpufreq/Kconfig index e912aae9473c..e77754ca94b4 100644 --- a/arch/i386/kernel/cpu/cpufreq/Kconfig +++ b/arch/i386/kernel/cpu/cpufreq/Kconfig | |||
@@ -90,10 +90,17 @@ config X86_POWERNOW_K8 | |||
90 | If in doubt, say N. | 90 | If in doubt, say N. |
91 | 91 | ||
92 | config X86_POWERNOW_K8_ACPI | 92 | config X86_POWERNOW_K8_ACPI |
93 | bool | 93 | bool "ACPI Support" |
94 | depends on X86_POWERNOW_K8 && ACPI_PROCESSOR | 94 | select ACPI_PROCESSOR |
95 | depends on !(X86_POWERNOW_K8 = y && ACPI_PROCESSOR = m) | 95 | depends on X86_POWERNOW_K8 |
96 | default y | 96 | default y |
97 | help | ||
98 | This provides access to the K8s Processor Performance States via ACPI. | ||
99 | This driver is probably required for CPUFreq to work with multi-socket and | ||
100 | SMP systems. It is not required on at least some single-socket yet | ||
101 | multi-core systems, even if SMP is enabled. | ||
102 | |||
103 | It is safe to say Y here. | ||
97 | 104 | ||
98 | config X86_GX_SUSPMOD | 105 | config X86_GX_SUSPMOD |
99 | tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" | 106 | tristate "Cyrix MediaGX/NatSemi Geode Suspend Modulation" |
diff --git a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c index 10baa3501ed3..18c8b67ea3a7 100644 --- a/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -167,11 +167,13 @@ static void do_drv_read(struct drv_cmd *cmd) | |||
167 | 167 | ||
168 | static void do_drv_write(struct drv_cmd *cmd) | 168 | static void do_drv_write(struct drv_cmd *cmd) |
169 | { | 169 | { |
170 | u32 h = 0; | 170 | u32 lo, hi; |
171 | 171 | ||
172 | switch (cmd->type) { | 172 | switch (cmd->type) { |
173 | case SYSTEM_INTEL_MSR_CAPABLE: | 173 | case SYSTEM_INTEL_MSR_CAPABLE: |
174 | wrmsr(cmd->addr.msr.reg, cmd->val, h); | 174 | rdmsr(cmd->addr.msr.reg, lo, hi); |
175 | lo = (lo & ~INTEL_MSR_RANGE) | (cmd->val & INTEL_MSR_RANGE); | ||
176 | wrmsr(cmd->addr.msr.reg, lo, hi); | ||
175 | break; | 177 | break; |
176 | case SYSTEM_IO_CAPABLE: | 178 | case SYSTEM_IO_CAPABLE: |
177 | acpi_os_write_port((acpi_io_address)cmd->addr.io.port, | 179 | acpi_os_write_port((acpi_io_address)cmd->addr.io.port, |
@@ -372,7 +374,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
372 | struct cpufreq_freqs freqs; | 374 | struct cpufreq_freqs freqs; |
373 | cpumask_t online_policy_cpus; | 375 | cpumask_t online_policy_cpus; |
374 | struct drv_cmd cmd; | 376 | struct drv_cmd cmd; |
375 | unsigned int msr; | ||
376 | unsigned int next_state = 0; /* Index into freq_table */ | 377 | unsigned int next_state = 0; /* Index into freq_table */ |
377 | unsigned int next_perf_state = 0; /* Index into perf table */ | 378 | unsigned int next_perf_state = 0; /* Index into perf table */ |
378 | unsigned int i; | 379 | unsigned int i; |
@@ -417,11 +418,7 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
417 | case SYSTEM_INTEL_MSR_CAPABLE: | 418 | case SYSTEM_INTEL_MSR_CAPABLE: |
418 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; | 419 | cmd.type = SYSTEM_INTEL_MSR_CAPABLE; |
419 | cmd.addr.msr.reg = MSR_IA32_PERF_CTL; | 420 | cmd.addr.msr.reg = MSR_IA32_PERF_CTL; |
420 | msr = | 421 | cmd.val = (u32) perf->states[next_perf_state].control; |
421 | (u32) perf->states[next_perf_state]. | ||
422 | control & INTEL_MSR_RANGE; | ||
423 | cmd.val = get_cur_val(online_policy_cpus); | ||
424 | cmd.val = (cmd.val & ~INTEL_MSR_RANGE) | msr; | ||
425 | break; | 422 | break; |
426 | case SYSTEM_IO_CAPABLE: | 423 | case SYSTEM_IO_CAPABLE: |
427 | cmd.type = SYSTEM_IO_CAPABLE; | 424 | cmd.type = SYSTEM_IO_CAPABLE; |
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.c b/arch/i386/kernel/cpu/cpufreq/longhaul.c index a3df9c039bd4..8eca59d4c8f4 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.c +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/string.h> | 31 | #include <linux/string.h> |
32 | #include <linux/delay.h> | ||
32 | 33 | ||
33 | #include <asm/msr.h> | 34 | #include <asm/msr.h> |
34 | #include <asm/timex.h> | 35 | #include <asm/timex.h> |
@@ -55,7 +56,6 @@ | |||
55 | /* Flags */ | 56 | /* Flags */ |
56 | #define USE_ACPI_C3 (1 << 1) | 57 | #define USE_ACPI_C3 (1 << 1) |
57 | #define USE_NORTHBRIDGE (1 << 2) | 58 | #define USE_NORTHBRIDGE (1 << 2) |
58 | #define USE_VT8235 (1 << 3) | ||
59 | 59 | ||
60 | static int cpu_model; | 60 | static int cpu_model; |
61 | static unsigned int numscales=16; | 61 | static unsigned int numscales=16; |
@@ -63,19 +63,15 @@ static unsigned int fsb; | |||
63 | 63 | ||
64 | static const struct mV_pos *vrm_mV_table; | 64 | static const struct mV_pos *vrm_mV_table; |
65 | static const unsigned char *mV_vrm_table; | 65 | static const unsigned char *mV_vrm_table; |
66 | struct f_msr { | ||
67 | u8 vrm; | ||
68 | u8 pos; | ||
69 | }; | ||
70 | static struct f_msr f_msr_table[32]; | ||
71 | 66 | ||
72 | static unsigned int highest_speed, lowest_speed; /* kHz */ | 67 | static unsigned int highest_speed, lowest_speed; /* kHz */ |
73 | static unsigned int minmult, maxmult; | 68 | static unsigned int minmult, maxmult; |
74 | static int can_scale_voltage; | 69 | static int can_scale_voltage; |
75 | static struct acpi_processor *pr = NULL; | 70 | static struct acpi_processor *pr = NULL; |
76 | static struct acpi_processor_cx *cx = NULL; | 71 | static struct acpi_processor_cx *cx = NULL; |
72 | static u32 acpi_regs_addr; | ||
77 | static u8 longhaul_flags; | 73 | static u8 longhaul_flags; |
78 | static u8 longhaul_pos; | 74 | static unsigned int longhaul_index; |
79 | 75 | ||
80 | /* Module parameters */ | 76 | /* Module parameters */ |
81 | static int scale_voltage; | 77 | static int scale_voltage; |
@@ -144,7 +140,7 @@ static void do_longhaul1(unsigned int clock_ratio_index) | |||
144 | rdmsrl(MSR_VIA_BCR2, bcr2.val); | 140 | rdmsrl(MSR_VIA_BCR2, bcr2.val); |
145 | /* Enable software clock multiplier */ | 141 | /* Enable software clock multiplier */ |
146 | bcr2.bits.ESOFTBF = 1; | 142 | bcr2.bits.ESOFTBF = 1; |
147 | bcr2.bits.CLOCKMUL = clock_ratio_index; | 143 | bcr2.bits.CLOCKMUL = clock_ratio_index & 0xff; |
148 | 144 | ||
149 | /* Sync to timer tick */ | 145 | /* Sync to timer tick */ |
150 | safe_halt(); | 146 | safe_halt(); |
@@ -163,14 +159,12 @@ static void do_longhaul1(unsigned int clock_ratio_index) | |||
163 | 159 | ||
164 | /* For processor with Longhaul MSR */ | 160 | /* For processor with Longhaul MSR */ |
165 | 161 | ||
166 | static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | 162 | static void do_powersaver(int cx_address, unsigned int clock_ratio_index, |
163 | unsigned int dir) | ||
167 | { | 164 | { |
168 | union msr_longhaul longhaul; | 165 | union msr_longhaul longhaul; |
169 | u8 dest_pos; | ||
170 | u32 t; | 166 | u32 t; |
171 | 167 | ||
172 | dest_pos = f_msr_table[clock_ratio_index].pos; | ||
173 | |||
174 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 168 | rdmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
175 | /* Setup new frequency */ | 169 | /* Setup new frequency */ |
176 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; | 170 | longhaul.bits.RevisionKey = longhaul.bits.RevisionID; |
@@ -178,11 +172,11 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | |||
178 | longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; | 172 | longhaul.bits.SoftBusRatio4 = (clock_ratio_index & 0x10) >> 4; |
179 | /* Setup new voltage */ | 173 | /* Setup new voltage */ |
180 | if (can_scale_voltage) | 174 | if (can_scale_voltage) |
181 | longhaul.bits.SoftVID = f_msr_table[clock_ratio_index].vrm; | 175 | longhaul.bits.SoftVID = (clock_ratio_index >> 8) & 0x1f; |
182 | /* Sync to timer tick */ | 176 | /* Sync to timer tick */ |
183 | safe_halt(); | 177 | safe_halt(); |
184 | /* Raise voltage if necessary */ | 178 | /* Raise voltage if necessary */ |
185 | if (can_scale_voltage && longhaul_pos < dest_pos) { | 179 | if (can_scale_voltage && dir) { |
186 | longhaul.bits.EnableSoftVID = 1; | 180 | longhaul.bits.EnableSoftVID = 1; |
187 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 181 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
188 | /* Change voltage */ | 182 | /* Change voltage */ |
@@ -199,7 +193,6 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | |||
199 | } | 193 | } |
200 | longhaul.bits.EnableSoftVID = 0; | 194 | longhaul.bits.EnableSoftVID = 0; |
201 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 195 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
202 | longhaul_pos = dest_pos; | ||
203 | } | 196 | } |
204 | 197 | ||
205 | /* Change frequency on next halt or sleep */ | 198 | /* Change frequency on next halt or sleep */ |
@@ -220,7 +213,7 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | |||
220 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 213 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
221 | 214 | ||
222 | /* Reduce voltage if necessary */ | 215 | /* Reduce voltage if necessary */ |
223 | if (can_scale_voltage && longhaul_pos > dest_pos) { | 216 | if (can_scale_voltage && !dir) { |
224 | longhaul.bits.EnableSoftVID = 1; | 217 | longhaul.bits.EnableSoftVID = 1; |
225 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 218 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
226 | /* Change voltage */ | 219 | /* Change voltage */ |
@@ -237,7 +230,6 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | |||
237 | } | 230 | } |
238 | longhaul.bits.EnableSoftVID = 0; | 231 | longhaul.bits.EnableSoftVID = 0; |
239 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); | 232 | wrmsrl(MSR_VIA_LONGHAUL, longhaul.val); |
240 | longhaul_pos = dest_pos; | ||
241 | } | 233 | } |
242 | } | 234 | } |
243 | 235 | ||
@@ -248,25 +240,28 @@ static void do_powersaver(int cx_address, unsigned int clock_ratio_index) | |||
248 | * Sets a new clock ratio. | 240 | * Sets a new clock ratio. |
249 | */ | 241 | */ |
250 | 242 | ||
251 | static void longhaul_setstate(unsigned int clock_ratio_index) | 243 | static void longhaul_setstate(unsigned int table_index) |
252 | { | 244 | { |
245 | unsigned int clock_ratio_index; | ||
253 | int speed, mult; | 246 | int speed, mult; |
254 | struct cpufreq_freqs freqs; | 247 | struct cpufreq_freqs freqs; |
255 | static unsigned int old_ratio=-1; | ||
256 | unsigned long flags; | 248 | unsigned long flags; |
257 | unsigned int pic1_mask, pic2_mask; | 249 | unsigned int pic1_mask, pic2_mask; |
250 | u16 bm_status = 0; | ||
251 | u32 bm_timeout = 1000; | ||
252 | unsigned int dir = 0; | ||
258 | 253 | ||
259 | if (old_ratio == clock_ratio_index) | 254 | clock_ratio_index = longhaul_table[table_index].index; |
260 | return; | 255 | /* Safety precautions */ |
261 | old_ratio = clock_ratio_index; | 256 | mult = clock_ratio[clock_ratio_index & 0x1f]; |
262 | |||
263 | mult = clock_ratio[clock_ratio_index]; | ||
264 | if (mult == -1) | 257 | if (mult == -1) |
265 | return; | 258 | return; |
266 | |||
267 | speed = calc_speed(mult); | 259 | speed = calc_speed(mult); |
268 | if ((speed > highest_speed) || (speed < lowest_speed)) | 260 | if ((speed > highest_speed) || (speed < lowest_speed)) |
269 | return; | 261 | return; |
262 | /* Voltage transition before frequency transition? */ | ||
263 | if (can_scale_voltage && longhaul_index < table_index) | ||
264 | dir = 1; | ||
270 | 265 | ||
271 | freqs.old = calc_speed(longhaul_get_cpu_mult()); | 266 | freqs.old = calc_speed(longhaul_get_cpu_mult()); |
272 | freqs.new = speed; | 267 | freqs.new = speed; |
@@ -285,11 +280,24 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
285 | outb(0xFF,0xA1); /* Overkill */ | 280 | outb(0xFF,0xA1); /* Overkill */ |
286 | outb(0xFE,0x21); /* TMR0 only */ | 281 | outb(0xFE,0x21); /* TMR0 only */ |
287 | 282 | ||
283 | /* Wait while PCI bus is busy. */ | ||
284 | if (acpi_regs_addr && (longhaul_flags & USE_NORTHBRIDGE | ||
285 | || ((pr != NULL) && pr->flags.bm_control))) { | ||
286 | bm_status = inw(acpi_regs_addr); | ||
287 | bm_status &= 1 << 4; | ||
288 | while (bm_status && bm_timeout) { | ||
289 | outw(1 << 4, acpi_regs_addr); | ||
290 | bm_timeout--; | ||
291 | bm_status = inw(acpi_regs_addr); | ||
292 | bm_status &= 1 << 4; | ||
293 | } | ||
294 | } | ||
295 | |||
288 | if (longhaul_flags & USE_NORTHBRIDGE) { | 296 | if (longhaul_flags & USE_NORTHBRIDGE) { |
289 | /* Disable AGP and PCI arbiters */ | 297 | /* Disable AGP and PCI arbiters */ |
290 | outb(3, 0x22); | 298 | outb(3, 0x22); |
291 | } else if ((pr != NULL) && pr->flags.bm_control) { | 299 | } else if ((pr != NULL) && pr->flags.bm_control) { |
292 | /* Disable bus master arbitration */ | 300 | /* Disable bus master arbitration */ |
293 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); | 301 | acpi_set_register(ACPI_BITREG_ARB_DISABLE, 1); |
294 | } | 302 | } |
295 | switch (longhaul_version) { | 303 | switch (longhaul_version) { |
@@ -314,9 +322,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
314 | if (longhaul_flags & USE_ACPI_C3) { | 322 | if (longhaul_flags & USE_ACPI_C3) { |
315 | /* Don't allow wakeup */ | 323 | /* Don't allow wakeup */ |
316 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); | 324 | acpi_set_register(ACPI_BITREG_BUS_MASTER_RLD, 0); |
317 | do_powersaver(cx->address, clock_ratio_index); | 325 | do_powersaver(cx->address, clock_ratio_index, dir); |
318 | } else { | 326 | } else { |
319 | do_powersaver(0, clock_ratio_index); | 327 | do_powersaver(0, clock_ratio_index, dir); |
320 | } | 328 | } |
321 | break; | 329 | break; |
322 | } | 330 | } |
@@ -336,6 +344,9 @@ static void longhaul_setstate(unsigned int clock_ratio_index) | |||
336 | 344 | ||
337 | freqs.new = calc_speed(longhaul_get_cpu_mult()); | 345 | freqs.new = calc_speed(longhaul_get_cpu_mult()); |
338 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | 346 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); |
347 | |||
348 | if (!bm_timeout) | ||
349 | printk(KERN_INFO PFX "Warning: Timeout while waiting for idle PCI bus.\n"); | ||
339 | } | 350 | } |
340 | 351 | ||
341 | /* | 352 | /* |
@@ -369,7 +380,8 @@ static int guess_fsb(int mult) | |||
369 | 380 | ||
370 | static int __init longhaul_get_ranges(void) | 381 | static int __init longhaul_get_ranges(void) |
371 | { | 382 | { |
372 | unsigned int j, k = 0; | 383 | unsigned int i, j, k = 0; |
384 | unsigned int ratio; | ||
373 | int mult; | 385 | int mult; |
374 | 386 | ||
375 | /* Get current frequency */ | 387 | /* Get current frequency */ |
@@ -423,8 +435,7 @@ static int __init longhaul_get_ranges(void) | |||
423 | if(!longhaul_table) | 435 | if(!longhaul_table) |
424 | return -ENOMEM; | 436 | return -ENOMEM; |
425 | 437 | ||
426 | for (j=0; j < numscales; j++) { | 438 | for (j = 0; j < numscales; j++) { |
427 | unsigned int ratio; | ||
428 | ratio = clock_ratio[j]; | 439 | ratio = clock_ratio[j]; |
429 | if (ratio == -1) | 440 | if (ratio == -1) |
430 | continue; | 441 | continue; |
@@ -434,13 +445,41 @@ static int __init longhaul_get_ranges(void) | |||
434 | longhaul_table[k].index = j; | 445 | longhaul_table[k].index = j; |
435 | k++; | 446 | k++; |
436 | } | 447 | } |
448 | if (k <= 1) { | ||
449 | kfree(longhaul_table); | ||
450 | return -ENODEV; | ||
451 | } | ||
452 | /* Sort */ | ||
453 | for (j = 0; j < k - 1; j++) { | ||
454 | unsigned int min_f, min_i; | ||
455 | min_f = longhaul_table[j].frequency; | ||
456 | min_i = j; | ||
457 | for (i = j + 1; i < k; i++) { | ||
458 | if (longhaul_table[i].frequency < min_f) { | ||
459 | min_f = longhaul_table[i].frequency; | ||
460 | min_i = i; | ||
461 | } | ||
462 | } | ||
463 | if (min_i != j) { | ||
464 | unsigned int temp; | ||
465 | temp = longhaul_table[j].frequency; | ||
466 | longhaul_table[j].frequency = longhaul_table[min_i].frequency; | ||
467 | longhaul_table[min_i].frequency = temp; | ||
468 | temp = longhaul_table[j].index; | ||
469 | longhaul_table[j].index = longhaul_table[min_i].index; | ||
470 | longhaul_table[min_i].index = temp; | ||
471 | } | ||
472 | } | ||
437 | 473 | ||
438 | longhaul_table[k].frequency = CPUFREQ_TABLE_END; | 474 | longhaul_table[k].frequency = CPUFREQ_TABLE_END; |
439 | if (!k) { | ||
440 | kfree (longhaul_table); | ||
441 | return -EINVAL; | ||
442 | } | ||
443 | 475 | ||
476 | /* Find index we are running on */ | ||
477 | for (j = 0; j < k; j++) { | ||
478 | if (clock_ratio[longhaul_table[j].index & 0x1f] == mult) { | ||
479 | longhaul_index = j; | ||
480 | break; | ||
481 | } | ||
482 | } | ||
444 | return 0; | 483 | return 0; |
445 | } | 484 | } |
446 | 485 | ||
@@ -448,7 +487,7 @@ static int __init longhaul_get_ranges(void) | |||
448 | static void __init longhaul_setup_voltagescaling(void) | 487 | static void __init longhaul_setup_voltagescaling(void) |
449 | { | 488 | { |
450 | union msr_longhaul longhaul; | 489 | union msr_longhaul longhaul; |
451 | struct mV_pos minvid, maxvid; | 490 | struct mV_pos minvid, maxvid, vid; |
452 | unsigned int j, speed, pos, kHz_step, numvscales; | 491 | unsigned int j, speed, pos, kHz_step, numvscales; |
453 | int min_vid_speed; | 492 | int min_vid_speed; |
454 | 493 | ||
@@ -459,11 +498,11 @@ static void __init longhaul_setup_voltagescaling(void) | |||
459 | } | 498 | } |
460 | 499 | ||
461 | if (!longhaul.bits.VRMRev) { | 500 | if (!longhaul.bits.VRMRev) { |
462 | printk (KERN_INFO PFX "VRM 8.5\n"); | 501 | printk(KERN_INFO PFX "VRM 8.5\n"); |
463 | vrm_mV_table = &vrm85_mV[0]; | 502 | vrm_mV_table = &vrm85_mV[0]; |
464 | mV_vrm_table = &mV_vrm85[0]; | 503 | mV_vrm_table = &mV_vrm85[0]; |
465 | } else { | 504 | } else { |
466 | printk (KERN_INFO PFX "Mobile VRM\n"); | 505 | printk(KERN_INFO PFX "Mobile VRM\n"); |
467 | if (cpu_model < CPU_NEHEMIAH) | 506 | if (cpu_model < CPU_NEHEMIAH) |
468 | return; | 507 | return; |
469 | vrm_mV_table = &mobilevrm_mV[0]; | 508 | vrm_mV_table = &mobilevrm_mV[0]; |
@@ -523,7 +562,6 @@ static void __init longhaul_setup_voltagescaling(void) | |||
523 | /* Calculate kHz for one voltage step */ | 562 | /* Calculate kHz for one voltage step */ |
524 | kHz_step = (highest_speed - min_vid_speed) / numvscales; | 563 | kHz_step = (highest_speed - min_vid_speed) / numvscales; |
525 | 564 | ||
526 | |||
527 | j = 0; | 565 | j = 0; |
528 | while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { | 566 | while (longhaul_table[j].frequency != CPUFREQ_TABLE_END) { |
529 | speed = longhaul_table[j].frequency; | 567 | speed = longhaul_table[j].frequency; |
@@ -531,15 +569,14 @@ static void __init longhaul_setup_voltagescaling(void) | |||
531 | pos = (speed - min_vid_speed) / kHz_step + minvid.pos; | 569 | pos = (speed - min_vid_speed) / kHz_step + minvid.pos; |
532 | else | 570 | else |
533 | pos = minvid.pos; | 571 | pos = minvid.pos; |
534 | f_msr_table[longhaul_table[j].index].vrm = mV_vrm_table[pos]; | 572 | longhaul_table[j].index |= mV_vrm_table[pos] << 8; |
535 | f_msr_table[longhaul_table[j].index].pos = pos; | 573 | vid = vrm_mV_table[mV_vrm_table[pos]]; |
574 | printk(KERN_INFO PFX "f: %d kHz, index: %d, vid: %d mV\n", speed, j, vid.mV); | ||
536 | j++; | 575 | j++; |
537 | } | 576 | } |
538 | 577 | ||
539 | longhaul_pos = maxvid.pos; | ||
540 | can_scale_voltage = 1; | 578 | can_scale_voltage = 1; |
541 | printk(KERN_INFO PFX "Voltage scaling enabled. " | 579 | printk(KERN_INFO PFX "Voltage scaling enabled.\n"); |
542 | "Use of \"conservative\" governor is highly recommended.\n"); | ||
543 | } | 580 | } |
544 | 581 | ||
545 | 582 | ||
@@ -553,15 +590,44 @@ static int longhaul_target(struct cpufreq_policy *policy, | |||
553 | unsigned int target_freq, unsigned int relation) | 590 | unsigned int target_freq, unsigned int relation) |
554 | { | 591 | { |
555 | unsigned int table_index = 0; | 592 | unsigned int table_index = 0; |
556 | unsigned int new_clock_ratio = 0; | 593 | unsigned int i; |
594 | unsigned int dir = 0; | ||
595 | u8 vid, current_vid; | ||
557 | 596 | ||
558 | if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) | 597 | if (cpufreq_frequency_table_target(policy, longhaul_table, target_freq, relation, &table_index)) |
559 | return -EINVAL; | 598 | return -EINVAL; |
560 | 599 | ||
561 | new_clock_ratio = longhaul_table[table_index].index & 0xFF; | 600 | /* Don't set same frequency again */ |
562 | 601 | if (longhaul_index == table_index) | |
563 | longhaul_setstate(new_clock_ratio); | 602 | return 0; |
564 | 603 | ||
604 | if (!can_scale_voltage) | ||
605 | longhaul_setstate(table_index); | ||
606 | else { | ||
607 | /* On test system voltage transitions exceeding single | ||
608 | * step up or down were turning motherboard off. Both | ||
609 | * "ondemand" and "userspace" are unsafe. C7 is doing | ||
610 | * this in hardware, C3 is old and we need to do this | ||
611 | * in software. */ | ||
612 | i = longhaul_index; | ||
613 | current_vid = (longhaul_table[longhaul_index].index >> 8) & 0x1f; | ||
614 | if (table_index > longhaul_index) | ||
615 | dir = 1; | ||
616 | while (i != table_index) { | ||
617 | vid = (longhaul_table[i].index >> 8) & 0x1f; | ||
618 | if (vid != current_vid) { | ||
619 | longhaul_setstate(i); | ||
620 | current_vid = vid; | ||
621 | msleep(200); | ||
622 | } | ||
623 | if (dir) | ||
624 | i++; | ||
625 | else | ||
626 | i--; | ||
627 | } | ||
628 | longhaul_setstate(table_index); | ||
629 | } | ||
630 | longhaul_index = table_index; | ||
565 | return 0; | 631 | return 0; |
566 | } | 632 | } |
567 | 633 | ||
@@ -590,11 +656,10 @@ static acpi_status longhaul_walk_callback(acpi_handle obj_handle, | |||
590 | static int enable_arbiter_disable(void) | 656 | static int enable_arbiter_disable(void) |
591 | { | 657 | { |
592 | struct pci_dev *dev; | 658 | struct pci_dev *dev; |
593 | int status; | 659 | int status = 1; |
594 | int reg; | 660 | int reg; |
595 | u8 pci_cmd; | 661 | u8 pci_cmd; |
596 | 662 | ||
597 | status = 1; | ||
598 | /* Find PLE133 host bridge */ | 663 | /* Find PLE133 host bridge */ |
599 | reg = 0x78; | 664 | reg = 0x78; |
600 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, | 665 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8601_0, |
@@ -627,13 +692,17 @@ static int enable_arbiter_disable(void) | |||
627 | return 0; | 692 | return 0; |
628 | } | 693 | } |
629 | 694 | ||
630 | static int longhaul_setup_vt8235(void) | 695 | static int longhaul_setup_southbridge(void) |
631 | { | 696 | { |
632 | struct pci_dev *dev; | 697 | struct pci_dev *dev; |
633 | u8 pci_cmd; | 698 | u8 pci_cmd; |
634 | 699 | ||
635 | /* Find VT8235 southbridge */ | 700 | /* Find VT8235 southbridge */ |
636 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); | 701 | dev = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235, NULL); |
702 | if (dev == NULL) | ||
703 | /* Find VT8237 southbridge */ | ||
704 | dev = pci_get_device(PCI_VENDOR_ID_VIA, | ||
705 | PCI_DEVICE_ID_VIA_8237, NULL); | ||
637 | if (dev != NULL) { | 706 | if (dev != NULL) { |
638 | /* Set transition time to max */ | 707 | /* Set transition time to max */ |
639 | pci_read_config_byte(dev, 0xec, &pci_cmd); | 708 | pci_read_config_byte(dev, 0xec, &pci_cmd); |
@@ -645,6 +714,14 @@ static int longhaul_setup_vt8235(void) | |||
645 | pci_read_config_byte(dev, 0xe5, &pci_cmd); | 714 | pci_read_config_byte(dev, 0xe5, &pci_cmd); |
646 | pci_cmd |= 1 << 7; | 715 | pci_cmd |= 1 << 7; |
647 | pci_write_config_byte(dev, 0xe5, pci_cmd); | 716 | pci_write_config_byte(dev, 0xe5, pci_cmd); |
717 | /* Get address of ACPI registers block*/ | ||
718 | pci_read_config_byte(dev, 0x81, &pci_cmd); | ||
719 | if (pci_cmd & 1 << 7) { | ||
720 | pci_read_config_dword(dev, 0x88, &acpi_regs_addr); | ||
721 | acpi_regs_addr &= 0xff00; | ||
722 | printk(KERN_INFO PFX "ACPI I/O at 0x%x\n", acpi_regs_addr); | ||
723 | } | ||
724 | |||
648 | pci_dev_put(dev); | 725 | pci_dev_put(dev); |
649 | return 1; | 726 | return 1; |
650 | } | 727 | } |
@@ -657,7 +734,6 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
657 | char *cpuname=NULL; | 734 | char *cpuname=NULL; |
658 | int ret; | 735 | int ret; |
659 | u32 lo, hi; | 736 | u32 lo, hi; |
660 | int vt8235_present; | ||
661 | 737 | ||
662 | /* Check what we have on this motherboard */ | 738 | /* Check what we have on this motherboard */ |
663 | switch (c->x86_model) { | 739 | switch (c->x86_model) { |
@@ -755,7 +831,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
755 | }; | 831 | }; |
756 | 832 | ||
757 | /* Doesn't hurt */ | 833 | /* Doesn't hurt */ |
758 | vt8235_present = longhaul_setup_vt8235(); | 834 | longhaul_setup_southbridge(); |
759 | 835 | ||
760 | /* Find ACPI data for processor */ | 836 | /* Find ACPI data for processor */ |
761 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, | 837 | acpi_walk_namespace(ACPI_TYPE_PROCESSOR, ACPI_ROOT_OBJECT, |
@@ -765,35 +841,26 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy) | |||
765 | /* Check ACPI support for C3 state */ | 841 | /* Check ACPI support for C3 state */ |
766 | if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { | 842 | if (pr != NULL && longhaul_version == TYPE_POWERSAVER) { |
767 | cx = &pr->power.states[ACPI_STATE_C3]; | 843 | cx = &pr->power.states[ACPI_STATE_C3]; |
768 | if (cx->address > 0 && cx->latency <= 1000) { | 844 | if (cx->address > 0 && cx->latency <= 1000) |
769 | longhaul_flags |= USE_ACPI_C3; | 845 | longhaul_flags |= USE_ACPI_C3; |
770 | goto print_support_type; | ||
771 | } | ||
772 | } | 846 | } |
773 | /* Check if northbridge is friendly */ | 847 | /* Check if northbridge is friendly */ |
774 | if (enable_arbiter_disable()) { | 848 | if (enable_arbiter_disable()) |
775 | longhaul_flags |= USE_NORTHBRIDGE; | 849 | longhaul_flags |= USE_NORTHBRIDGE; |
776 | goto print_support_type; | 850 | |
777 | } | ||
778 | /* Use VT8235 southbridge if present */ | ||
779 | if (longhaul_version == TYPE_POWERSAVER && vt8235_present) { | ||
780 | longhaul_flags |= USE_VT8235; | ||
781 | goto print_support_type; | ||
782 | } | ||
783 | /* Check ACPI support for bus master arbiter disable */ | 851 | /* Check ACPI support for bus master arbiter disable */ |
784 | if ((pr == NULL) || !(pr->flags.bm_control)) { | 852 | if (!(longhaul_flags & USE_ACPI_C3 |
853 | || longhaul_flags & USE_NORTHBRIDGE) | ||
854 | && ((pr == NULL) || !(pr->flags.bm_control))) { | ||
785 | printk(KERN_ERR PFX | 855 | printk(KERN_ERR PFX |
786 | "No ACPI support. Unsupported northbridge.\n"); | 856 | "No ACPI support. Unsupported northbridge.\n"); |
787 | return -ENODEV; | 857 | return -ENODEV; |
788 | } | 858 | } |
789 | 859 | ||
790 | print_support_type: | ||
791 | if (longhaul_flags & USE_NORTHBRIDGE) | 860 | if (longhaul_flags & USE_NORTHBRIDGE) |
792 | printk (KERN_INFO PFX "Using northbridge support.\n"); | 861 | printk(KERN_INFO PFX "Using northbridge support.\n"); |
793 | else if (longhaul_flags & USE_VT8235) | 862 | if (longhaul_flags & USE_ACPI_C3) |
794 | printk (KERN_INFO PFX "Using VT8235 support.\n"); | 863 | printk(KERN_INFO PFX "Using ACPI support.\n"); |
795 | else | ||
796 | printk (KERN_INFO PFX "Using ACPI support.\n"); | ||
797 | 864 | ||
798 | ret = longhaul_get_ranges(); | 865 | ret = longhaul_get_ranges(); |
799 | if (ret != 0) | 866 | if (ret != 0) |
diff --git a/arch/i386/kernel/cpu/cpufreq/longhaul.h b/arch/i386/kernel/cpu/cpufreq/longhaul.h index 102548f12842..4fcc320997df 100644 --- a/arch/i386/kernel/cpu/cpufreq/longhaul.h +++ b/arch/i386/kernel/cpu/cpufreq/longhaul.h | |||
@@ -180,7 +180,7 @@ static const int __initdata ezrat_clock_ratio[32] = { | |||
180 | 180 | ||
181 | -1, /* 0000 -> RESERVED (10.0x) */ | 181 | -1, /* 0000 -> RESERVED (10.0x) */ |
182 | 110, /* 0001 -> 11.0x */ | 182 | 110, /* 0001 -> 11.0x */ |
183 | 120, /* 0010 -> 12.0x */ | 183 | -1, /* 0010 -> 12.0x */ |
184 | -1, /* 0011 -> RESERVED (9.0x)*/ | 184 | -1, /* 0011 -> RESERVED (9.0x)*/ |
185 | 105, /* 0100 -> 10.5x */ | 185 | 105, /* 0100 -> 10.5x */ |
186 | 115, /* 0101 -> 11.5x */ | 186 | 115, /* 0101 -> 11.5x */ |
@@ -237,7 +237,7 @@ static const int __initdata ezrat_eblcr[32] = { | |||
237 | 237 | ||
238 | static const int __initdata nehemiah_clock_ratio[32] = { | 238 | static const int __initdata nehemiah_clock_ratio[32] = { |
239 | 100, /* 0000 -> 10.0x */ | 239 | 100, /* 0000 -> 10.0x */ |
240 | 160, /* 0001 -> 16.0x */ | 240 | -1, /* 0001 -> 16.0x */ |
241 | 40, /* 0010 -> 4.0x */ | 241 | 40, /* 0010 -> 4.0x */ |
242 | 90, /* 0011 -> 9.0x */ | 242 | 90, /* 0011 -> 9.0x */ |
243 | 95, /* 0100 -> 9.5x */ | 243 | 95, /* 0100 -> 9.5x */ |
@@ -252,10 +252,10 @@ static const int __initdata nehemiah_clock_ratio[32] = { | |||
252 | 75, /* 1101 -> 7.5x */ | 252 | 75, /* 1101 -> 7.5x */ |
253 | 85, /* 1110 -> 8.5x */ | 253 | 85, /* 1110 -> 8.5x */ |
254 | 120, /* 1111 -> 12.0x */ | 254 | 120, /* 1111 -> 12.0x */ |
255 | 100, /* 0000 -> 10.0x */ | 255 | -1, /* 0000 -> 10.0x */ |
256 | 110, /* 0001 -> 11.0x */ | 256 | 110, /* 0001 -> 11.0x */ |
257 | 120, /* 0010 -> 12.0x */ | 257 | -1, /* 0010 -> 12.0x */ |
258 | 90, /* 0011 -> 9.0x */ | 258 | -1, /* 0011 -> 9.0x */ |
259 | 105, /* 0100 -> 10.5x */ | 259 | 105, /* 0100 -> 10.5x */ |
260 | 115, /* 0101 -> 11.5x */ | 260 | 115, /* 0101 -> 11.5x */ |
261 | 125, /* 0110 -> 12.5x */ | 261 | 125, /* 0110 -> 12.5x */ |
@@ -267,7 +267,7 @@ static const int __initdata nehemiah_clock_ratio[32] = { | |||
267 | 145, /* 1100 -> 14.5x */ | 267 | 145, /* 1100 -> 14.5x */ |
268 | 155, /* 1101 -> 15.5x */ | 268 | 155, /* 1101 -> 15.5x */ |
269 | -1, /* 1110 -> RESERVED (13.0x) */ | 269 | -1, /* 1110 -> RESERVED (13.0x) */ |
270 | 120, /* 1111 -> 12.0x */ | 270 | -1, /* 1111 -> 12.0x */ |
271 | }; | 271 | }; |
272 | 272 | ||
273 | static const int __initdata nehemiah_eblcr[32] = { | 273 | static const int __initdata nehemiah_eblcr[32] = { |
diff --git a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c index 4ade55c5f333..977336834127 100644 --- a/arch/i386/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/i386/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -1330,8 +1330,9 @@ static int __cpuinit powernowk8_init(void) | |||
1330 | 1330 | ||
1331 | if (supported_cpus == num_online_cpus()) { | 1331 | if (supported_cpus == num_online_cpus()) { |
1332 | printk(KERN_INFO PFX "Found %d %s " | 1332 | printk(KERN_INFO PFX "Found %d %s " |
1333 | "processors (" VERSION ")\n", supported_cpus, | 1333 | "processors (%d cpu cores) (" VERSION ")\n", |
1334 | boot_cpu_data.x86_model_id); | 1334 | supported_cpus/cpu_data[0].booted_cores, |
1335 | boot_cpu_data.x86_model_id, supported_cpus); | ||
1335 | return cpufreq_register_driver(&cpufreq_amd64_driver); | 1336 | return cpufreq_register_driver(&cpufreq_amd64_driver); |
1336 | } | 1337 | } |
1337 | 1338 | ||
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index eb37fba9b7ef..0db9e1bda322 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c | |||
@@ -826,13 +826,21 @@ static int cpufreq_add_dev (struct sys_device * sys_dev) | |||
826 | /* set up files for this cpu device */ | 826 | /* set up files for this cpu device */ |
827 | drv_attr = cpufreq_driver->attr; | 827 | drv_attr = cpufreq_driver->attr; |
828 | while ((drv_attr) && (*drv_attr)) { | 828 | while ((drv_attr) && (*drv_attr)) { |
829 | sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); | 829 | ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); |
830 | if (ret) | ||
831 | goto err_out_driver_exit; | ||
830 | drv_attr++; | 832 | drv_attr++; |
831 | } | 833 | } |
832 | if (cpufreq_driver->get) | 834 | if (cpufreq_driver->get){ |
833 | sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); | 835 | ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); |
834 | if (cpufreq_driver->target) | 836 | if (ret) |
835 | sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | 837 | goto err_out_driver_exit; |
838 | } | ||
839 | if (cpufreq_driver->target){ | ||
840 | ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); | ||
841 | if (ret) | ||
842 | goto err_out_driver_exit; | ||
843 | } | ||
836 | 844 | ||
837 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 845 | spin_lock_irqsave(&cpufreq_driver_lock, flags); |
838 | for_each_cpu_mask(j, policy->cpus) { | 846 | for_each_cpu_mask(j, policy->cpus) { |
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 8532bb79e5fc..e794527e4925 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c | |||
@@ -96,15 +96,25 @@ static struct dbs_tuners { | |||
96 | 96 | ||
97 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu) | 97 | static inline cputime64_t get_cpu_idle_time(unsigned int cpu) |
98 | { | 98 | { |
99 | cputime64_t retval; | 99 | cputime64_t idle_time; |
100 | cputime64_t cur_jiffies; | ||
101 | cputime64_t busy_time; | ||
100 | 102 | ||
101 | retval = cputime64_add(kstat_cpu(cpu).cpustat.idle, | 103 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); |
102 | kstat_cpu(cpu).cpustat.iowait); | 104 | busy_time = cputime64_add(kstat_cpu(cpu).cpustat.user, |
105 | kstat_cpu(cpu).cpustat.system); | ||
103 | 106 | ||
104 | if (dbs_tuners_ins.ignore_nice) | 107 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.irq); |
105 | retval = cputime64_add(retval, kstat_cpu(cpu).cpustat.nice); | 108 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.softirq); |
109 | busy_time = cputime64_add(busy_time, kstat_cpu(cpu).cpustat.steal); | ||
106 | 110 | ||
107 | return retval; | 111 | if (!dbs_tuners_ins.ignore_nice) { |
112 | busy_time = cputime64_add(busy_time, | ||
113 | kstat_cpu(cpu).cpustat.nice); | ||
114 | } | ||
115 | |||
116 | idle_time = cputime64_sub(cur_jiffies, busy_time); | ||
117 | return idle_time; | ||
108 | } | 118 | } |
109 | 119 | ||
110 | /* | 120 | /* |
@@ -325,7 +335,7 @@ static struct attribute_group dbs_attr_group = { | |||
325 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | 335 | static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) |
326 | { | 336 | { |
327 | unsigned int idle_ticks, total_ticks; | 337 | unsigned int idle_ticks, total_ticks; |
328 | unsigned int load; | 338 | unsigned int load = 0; |
329 | cputime64_t cur_jiffies; | 339 | cputime64_t cur_jiffies; |
330 | 340 | ||
331 | struct cpufreq_policy *policy; | 341 | struct cpufreq_policy *policy; |
@@ -339,7 +349,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
339 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); | 349 | cur_jiffies = jiffies64_to_cputime64(get_jiffies_64()); |
340 | total_ticks = (unsigned int) cputime64_sub(cur_jiffies, | 350 | total_ticks = (unsigned int) cputime64_sub(cur_jiffies, |
341 | this_dbs_info->prev_cpu_wall); | 351 | this_dbs_info->prev_cpu_wall); |
342 | this_dbs_info->prev_cpu_wall = cur_jiffies; | 352 | this_dbs_info->prev_cpu_wall = get_jiffies_64(); |
353 | |||
343 | if (!total_ticks) | 354 | if (!total_ticks) |
344 | return; | 355 | return; |
345 | /* | 356 | /* |
@@ -370,7 +381,8 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
370 | if (tmp_idle_ticks < idle_ticks) | 381 | if (tmp_idle_ticks < idle_ticks) |
371 | idle_ticks = tmp_idle_ticks; | 382 | idle_ticks = tmp_idle_ticks; |
372 | } | 383 | } |
373 | load = (100 * (total_ticks - idle_ticks)) / total_ticks; | 384 | if (likely(total_ticks > idle_ticks)) |
385 | load = (100 * (total_ticks - idle_ticks)) / total_ticks; | ||
374 | 386 | ||
375 | /* Check for frequency increase */ | 387 | /* Check for frequency increase */ |
376 | if (load > dbs_tuners_ins.up_threshold) { | 388 | if (load > dbs_tuners_ins.up_threshold) { |
diff --git a/drivers/cpufreq/cpufreq_userspace.c b/drivers/cpufreq/cpufreq_userspace.c index a648970338b0..51bedab6c808 100644 --- a/drivers/cpufreq/cpufreq_userspace.c +++ b/drivers/cpufreq/cpufreq_userspace.c | |||
@@ -37,6 +37,7 @@ static unsigned int cpu_set_freq[NR_CPUS]; /* CPU freq desired by userspace */ | |||
37 | static unsigned int cpu_is_managed[NR_CPUS]; | 37 | static unsigned int cpu_is_managed[NR_CPUS]; |
38 | 38 | ||
39 | static DEFINE_MUTEX (userspace_mutex); | 39 | static DEFINE_MUTEX (userspace_mutex); |
40 | static int cpus_using_userspace_governor; | ||
40 | 41 | ||
41 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) | 42 | #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_GOVERNOR, "userspace", msg) |
42 | 43 | ||
@@ -47,7 +48,11 @@ userspace_cpufreq_notifier(struct notifier_block *nb, unsigned long val, | |||
47 | { | 48 | { |
48 | struct cpufreq_freqs *freq = data; | 49 | struct cpufreq_freqs *freq = data; |
49 | 50 | ||
50 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", freq->cpu, freq->new); | 51 | if (!cpu_is_managed[freq->cpu]) |
52 | return 0; | ||
53 | |||
54 | dprintk("saving cpu_cur_freq of cpu %u to be %u kHz\n", | ||
55 | freq->cpu, freq->new); | ||
51 | cpu_cur_freq[freq->cpu] = freq->new; | 56 | cpu_cur_freq[freq->cpu] = freq->new; |
52 | 57 | ||
53 | return 0; | 58 | return 0; |
@@ -142,6 +147,13 @@ static int cpufreq_governor_userspace(struct cpufreq_policy *policy, | |||
142 | if (rc) | 147 | if (rc) |
143 | goto start_out; | 148 | goto start_out; |
144 | 149 | ||
150 | if (cpus_using_userspace_governor == 0) { | ||
151 | cpufreq_register_notifier( | ||
152 | &userspace_cpufreq_notifier_block, | ||
153 | CPUFREQ_TRANSITION_NOTIFIER); | ||
154 | } | ||
155 | cpus_using_userspace_governor++; | ||
156 | |||
145 | cpu_is_managed[cpu] = 1; | 157 | cpu_is_managed[cpu] = 1; |
146 | cpu_min_freq[cpu] = policy->min; | 158 | cpu_min_freq[cpu] = policy->min; |
147 | cpu_max_freq[cpu] = policy->max; | 159 | cpu_max_freq[cpu] = policy->max; |
@@ -153,6 +165,13 @@ start_out: | |||
153 | break; | 165 | break; |
154 | case CPUFREQ_GOV_STOP: | 166 | case CPUFREQ_GOV_STOP: |
155 | mutex_lock(&userspace_mutex); | 167 | mutex_lock(&userspace_mutex); |
168 | cpus_using_userspace_governor--; | ||
169 | if (cpus_using_userspace_governor == 0) { | ||
170 | cpufreq_unregister_notifier( | ||
171 | &userspace_cpufreq_notifier_block, | ||
172 | CPUFREQ_TRANSITION_NOTIFIER); | ||
173 | } | ||
174 | |||
156 | cpu_is_managed[cpu] = 0; | 175 | cpu_is_managed[cpu] = 0; |
157 | cpu_min_freq[cpu] = 0; | 176 | cpu_min_freq[cpu] = 0; |
158 | cpu_max_freq[cpu] = 0; | 177 | cpu_max_freq[cpu] = 0; |
@@ -198,7 +217,6 @@ EXPORT_SYMBOL(cpufreq_gov_userspace); | |||
198 | 217 | ||
199 | static int __init cpufreq_gov_userspace_init(void) | 218 | static int __init cpufreq_gov_userspace_init(void) |
200 | { | 219 | { |
201 | cpufreq_register_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); | ||
202 | return cpufreq_register_governor(&cpufreq_gov_userspace); | 220 | return cpufreq_register_governor(&cpufreq_gov_userspace); |
203 | } | 221 | } |
204 | 222 | ||
@@ -206,7 +224,6 @@ static int __init cpufreq_gov_userspace_init(void) | |||
206 | static void __exit cpufreq_gov_userspace_exit(void) | 224 | static void __exit cpufreq_gov_userspace_exit(void) |
207 | { | 225 | { |
208 | cpufreq_unregister_governor(&cpufreq_gov_userspace); | 226 | cpufreq_unregister_governor(&cpufreq_gov_userspace); |
209 | cpufreq_unregister_notifier(&userspace_cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER); | ||
210 | } | 227 | } |
211 | 228 | ||
212 | 229 | ||