diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-08-15 10:16:15 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-08-15 10:16:15 -0400 |
commit | 1a10390708d675ebf1a2f5e169a5165626afbd88 (patch) | |
tree | d9ee7d10abd65e580fb74152a501089f51174225 /arch/x86/kernel/cpu | |
parent | 239bd83104ec6bcba90221d8b0973d2565142ef8 (diff) | |
parent | b635acec48bcaa9183fcbf4e3955616b0d4119b5 (diff) |
Merge branch 'linus' into x86/cpu
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 6 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/Kconfig | 4 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 10 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/elanfreq.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 125 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | 12 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 3 |
9 files changed, 96 insertions, 72 deletions
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c9b58a806e8..c8e315f1aa8 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
@@ -50,6 +50,8 @@ static double __initdata y = 3145727.0; | |||
50 | */ | 50 | */ |
51 | static void __init check_fpu(void) | 51 | static void __init check_fpu(void) |
52 | { | 52 | { |
53 | s32 fdiv_bug; | ||
54 | |||
53 | if (!boot_cpu_data.hard_math) { | 55 | if (!boot_cpu_data.hard_math) { |
54 | #ifndef CONFIG_MATH_EMULATION | 56 | #ifndef CONFIG_MATH_EMULATION |
55 | printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); | 57 | printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); |
@@ -74,8 +76,10 @@ static void __init check_fpu(void) | |||
74 | "fistpl %0\n\t" | 76 | "fistpl %0\n\t" |
75 | "fwait\n\t" | 77 | "fwait\n\t" |
76 | "fninit" | 78 | "fninit" |
77 | : "=m" (*&boot_cpu_data.fdiv_bug) | 79 | : "=m" (*&fdiv_bug) |
78 | : "m" (*&x), "m" (*&y)); | 80 | : "m" (*&x), "m" (*&y)); |
81 | |||
82 | boot_cpu_data.fdiv_bug = fdiv_bug; | ||
79 | if (boot_cpu_data.fdiv_bug) | 83 | if (boot_cpu_data.fdiv_bug) |
80 | printk("Hmm, FPU with FDIV bug.\n"); | 84 | printk("Hmm, FPU with FDIV bug.\n"); |
81 | } | 85 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig index cb7a5715596..efae3b22a0f 100644 --- a/arch/x86/kernel/cpu/cpufreq/Kconfig +++ b/arch/x86/kernel/cpu/cpufreq/Kconfig | |||
@@ -235,9 +235,9 @@ config X86_LONGHAUL | |||
235 | If in doubt, say N. | 235 | If in doubt, say N. |
236 | 236 | ||
237 | config X86_E_POWERSAVER | 237 | config X86_E_POWERSAVER |
238 | tristate "VIA C7 Enhanced PowerSaver (EXPERIMENTAL)" | 238 | tristate "VIA C7 Enhanced PowerSaver" |
239 | select CPU_FREQ_TABLE | 239 | select CPU_FREQ_TABLE |
240 | depends on X86_32 && EXPERIMENTAL | 240 | depends on X86_32 |
241 | help | 241 | help |
242 | This adds the CPUFreq driver for VIA C7 processors. | 242 | This adds the CPUFreq driver for VIA C7 processors. |
243 | 243 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index ff2fff56f0a..dd097b83583 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -200,12 +200,10 @@ static void drv_read(struct drv_cmd *cmd) | |||
200 | static void drv_write(struct drv_cmd *cmd) | 200 | static void drv_write(struct drv_cmd *cmd) |
201 | { | 201 | { |
202 | cpumask_t saved_mask = current->cpus_allowed; | 202 | cpumask_t saved_mask = current->cpus_allowed; |
203 | cpumask_of_cpu_ptr_declare(cpu_mask); | ||
204 | unsigned int i; | 203 | unsigned int i; |
205 | 204 | ||
206 | for_each_cpu_mask_nr(i, cmd->mask) { | 205 | for_each_cpu_mask_nr(i, cmd->mask) { |
207 | cpumask_of_cpu_ptr_next(cpu_mask, i); | 206 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(i)); |
208 | set_cpus_allowed_ptr(current, cpu_mask); | ||
209 | do_drv_write(cmd); | 207 | do_drv_write(cmd); |
210 | } | 208 | } |
211 | 209 | ||
@@ -269,12 +267,11 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
269 | } aperf_cur, mperf_cur; | 267 | } aperf_cur, mperf_cur; |
270 | 268 | ||
271 | cpumask_t saved_mask; | 269 | cpumask_t saved_mask; |
272 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
273 | unsigned int perf_percent; | 270 | unsigned int perf_percent; |
274 | unsigned int retval; | 271 | unsigned int retval; |
275 | 272 | ||
276 | saved_mask = current->cpus_allowed; | 273 | saved_mask = current->cpus_allowed; |
277 | set_cpus_allowed_ptr(current, cpu_mask); | 274 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
278 | if (get_cpu() != cpu) { | 275 | if (get_cpu() != cpu) { |
279 | /* We were not able to run on requested processor */ | 276 | /* We were not able to run on requested processor */ |
280 | put_cpu(); | 277 | put_cpu(); |
@@ -340,7 +337,6 @@ static unsigned int get_measured_perf(unsigned int cpu) | |||
340 | 337 | ||
341 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | 338 | static unsigned int get_cur_freq_on_cpu(unsigned int cpu) |
342 | { | 339 | { |
343 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
344 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); | 340 | struct acpi_cpufreq_data *data = per_cpu(drv_data, cpu); |
345 | unsigned int freq; | 341 | unsigned int freq; |
346 | unsigned int cached_freq; | 342 | unsigned int cached_freq; |
@@ -353,7 +349,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
353 | } | 349 | } |
354 | 350 | ||
355 | cached_freq = data->freq_table[data->acpi_data->state].frequency; | 351 | cached_freq = data->freq_table[data->acpi_data->state].frequency; |
356 | freq = extract_freq(get_cur_val(cpu_mask), data); | 352 | freq = extract_freq(get_cur_val(&cpumask_of_cpu(cpu)), data); |
357 | if (freq != cached_freq) { | 353 | if (freq != cached_freq) { |
358 | /* | 354 | /* |
359 | * The dreaded BIOS frequency change behind our back. | 355 | * The dreaded BIOS frequency change behind our back. |
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c index 94619c22f56..e4a4bf870e9 100644 --- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c +++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c | |||
@@ -44,7 +44,7 @@ struct s_elan_multiplier { | |||
44 | * It is important that the frequencies | 44 | * It is important that the frequencies |
45 | * are listed in ascending order here! | 45 | * are listed in ascending order here! |
46 | */ | 46 | */ |
47 | struct s_elan_multiplier elan_multiplier[] = { | 47 | static struct s_elan_multiplier elan_multiplier[] = { |
48 | {1000, 0x02, 0x18}, | 48 | {1000, 0x02, 0x18}, |
49 | {2000, 0x02, 0x10}, | 49 | {2000, 0x02, 0x10}, |
50 | {4000, 0x02, 0x08}, | 50 | {4000, 0x02, 0x08}, |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 53c7b693697..4e7271999a7 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
@@ -66,7 +66,6 @@ static u32 find_freq_from_fid(u32 fid) | |||
66 | return 800 + (fid * 100); | 66 | return 800 + (fid * 100); |
67 | } | 67 | } |
68 | 68 | ||
69 | |||
70 | /* Return a frequency in KHz, given an input fid */ | 69 | /* Return a frequency in KHz, given an input fid */ |
71 | static u32 find_khz_freq_from_fid(u32 fid) | 70 | static u32 find_khz_freq_from_fid(u32 fid) |
72 | { | 71 | { |
@@ -78,7 +77,6 @@ static u32 find_khz_freq_from_pstate(struct cpufreq_frequency_table *data, u32 p | |||
78 | return data[pstate].frequency; | 77 | return data[pstate].frequency; |
79 | } | 78 | } |
80 | 79 | ||
81 | |||
82 | /* Return the vco fid for an input fid | 80 | /* Return the vco fid for an input fid |
83 | * | 81 | * |
84 | * Each "low" fid has corresponding "high" fid, and you can get to "low" fids | 82 | * Each "low" fid has corresponding "high" fid, and you can get to "low" fids |
@@ -166,7 +164,6 @@ static void fidvid_msr_init(void) | |||
166 | wrmsr(MSR_FIDVID_CTL, lo, hi); | 164 | wrmsr(MSR_FIDVID_CTL, lo, hi); |
167 | } | 165 | } |
168 | 166 | ||
169 | |||
170 | /* write the new fid value along with the other control fields to the msr */ | 167 | /* write the new fid value along with the other control fields to the msr */ |
171 | static int write_new_fid(struct powernow_k8_data *data, u32 fid) | 168 | static int write_new_fid(struct powernow_k8_data *data, u32 fid) |
172 | { | 169 | { |
@@ -479,12 +476,11 @@ static int core_voltage_post_transition(struct powernow_k8_data *data, u32 reqvi | |||
479 | static int check_supported_cpu(unsigned int cpu) | 476 | static int check_supported_cpu(unsigned int cpu) |
480 | { | 477 | { |
481 | cpumask_t oldmask; | 478 | cpumask_t oldmask; |
482 | cpumask_of_cpu_ptr(cpu_mask, cpu); | ||
483 | u32 eax, ebx, ecx, edx; | 479 | u32 eax, ebx, ecx, edx; |
484 | unsigned int rc = 0; | 480 | unsigned int rc = 0; |
485 | 481 | ||
486 | oldmask = current->cpus_allowed; | 482 | oldmask = current->cpus_allowed; |
487 | set_cpus_allowed_ptr(current, cpu_mask); | 483 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
488 | 484 | ||
489 | if (smp_processor_id() != cpu) { | 485 | if (smp_processor_id() != cpu) { |
490 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); | 486 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", cpu); |
@@ -741,44 +737,63 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
741 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | 737 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
742 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) | 738 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) |
743 | { | 739 | { |
744 | if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) | 740 | if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) |
745 | return; | 741 | return; |
746 | 742 | ||
747 | data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; | 743 | data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; |
748 | data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; | 744 | data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; |
749 | data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | 745 | data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; |
750 | data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; | 746 | data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; |
751 | data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); | 747 | data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); |
752 | data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; | 748 | data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; |
749 | } | ||
750 | |||
751 | |||
752 | static struct acpi_processor_performance *acpi_perf_data; | ||
753 | static int preregister_valid; | ||
754 | |||
755 | static int powernow_k8_cpu_preinit_acpi(void) | ||
756 | { | ||
757 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | ||
758 | if (!acpi_perf_data) | ||
759 | return -ENODEV; | ||
760 | |||
761 | if (acpi_processor_preregister_performance(acpi_perf_data)) | ||
762 | return -ENODEV; | ||
763 | else | ||
764 | preregister_valid = 1; | ||
765 | return 0; | ||
753 | } | 766 | } |
754 | 767 | ||
755 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | 768 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) |
756 | { | 769 | { |
757 | struct cpufreq_frequency_table *powernow_table; | 770 | struct cpufreq_frequency_table *powernow_table; |
758 | int ret_val; | 771 | int ret_val; |
772 | int cpu = 0; | ||
759 | 773 | ||
760 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { | 774 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); |
775 | if (acpi_processor_register_performance(data->acpi_data, data->cpu)) { | ||
761 | dprintk("register performance failed: bad ACPI data\n"); | 776 | dprintk("register performance failed: bad ACPI data\n"); |
762 | return -EIO; | 777 | return -EIO; |
763 | } | 778 | } |
764 | 779 | ||
765 | /* verify the data contained in the ACPI structures */ | 780 | /* verify the data contained in the ACPI structures */ |
766 | if (data->acpi_data.state_count <= 1) { | 781 | if (data->acpi_data->state_count <= 1) { |
767 | dprintk("No ACPI P-States\n"); | 782 | dprintk("No ACPI P-States\n"); |
768 | goto err_out; | 783 | goto err_out; |
769 | } | 784 | } |
770 | 785 | ||
771 | if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | 786 | if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || |
772 | (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | 787 | (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { |
773 | dprintk("Invalid control/status registers (%x - %x)\n", | 788 | dprintk("Invalid control/status registers (%x - %x)\n", |
774 | data->acpi_data.control_register.space_id, | 789 | data->acpi_data->control_register.space_id, |
775 | data->acpi_data.status_register.space_id); | 790 | data->acpi_data->status_register.space_id); |
776 | goto err_out; | 791 | goto err_out; |
777 | } | 792 | } |
778 | 793 | ||
779 | /* fill in data->powernow_table */ | 794 | /* fill in data->powernow_table */ |
780 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | 795 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) |
781 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); | 796 | * (data->acpi_data->state_count + 1)), GFP_KERNEL); |
782 | if (!powernow_table) { | 797 | if (!powernow_table) { |
783 | dprintk("powernow_table memory alloc failure\n"); | 798 | dprintk("powernow_table memory alloc failure\n"); |
784 | goto err_out; | 799 | goto err_out; |
@@ -791,12 +806,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
791 | if (ret_val) | 806 | if (ret_val) |
792 | goto err_out_mem; | 807 | goto err_out_mem; |
793 | 808 | ||
794 | powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; | 809 | powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; |
795 | powernow_table[data->acpi_data.state_count].index = 0; | 810 | powernow_table[data->acpi_data->state_count].index = 0; |
796 | data->powernow_table = powernow_table; | 811 | data->powernow_table = powernow_table; |
797 | 812 | ||
798 | /* fill in data */ | 813 | /* fill in data */ |
799 | data->numps = data->acpi_data.state_count; | 814 | data->numps = data->acpi_data->state_count; |
800 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 815 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) |
801 | print_basics(data); | 816 | print_basics(data); |
802 | powernow_k8_acpi_pst_values(data, 0); | 817 | powernow_k8_acpi_pst_values(data, 0); |
@@ -804,16 +819,31 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
804 | /* notify BIOS that we exist */ | 819 | /* notify BIOS that we exist */ |
805 | acpi_processor_notify_smm(THIS_MODULE); | 820 | acpi_processor_notify_smm(THIS_MODULE); |
806 | 821 | ||
822 | /* determine affinity, from ACPI if available */ | ||
823 | if (preregister_valid) { | ||
824 | if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) || | ||
825 | (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY)) | ||
826 | data->starting_core_affinity = data->acpi_data->shared_cpu_map; | ||
827 | else | ||
828 | data->starting_core_affinity = cpumask_of_cpu(data->cpu); | ||
829 | } else { | ||
830 | /* best guess from family if not */ | ||
831 | if (cpu_family == CPU_HW_PSTATE) | ||
832 | data->starting_core_affinity = cpumask_of_cpu(data->cpu); | ||
833 | else | ||
834 | data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu); | ||
835 | } | ||
836 | |||
807 | return 0; | 837 | return 0; |
808 | 838 | ||
809 | err_out_mem: | 839 | err_out_mem: |
810 | kfree(powernow_table); | 840 | kfree(powernow_table); |
811 | 841 | ||
812 | err_out: | 842 | err_out: |
813 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | 843 | acpi_processor_unregister_performance(data->acpi_data, data->cpu); |
814 | 844 | ||
815 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ | 845 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ |
816 | data->acpi_data.state_count = 0; | 846 | data->acpi_data->state_count = 0; |
817 | 847 | ||
818 | return -ENODEV; | 848 | return -ENODEV; |
819 | } | 849 | } |
@@ -825,10 +855,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
825 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); | 855 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); |
826 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; | 856 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; |
827 | 857 | ||
828 | for (i = 0; i < data->acpi_data.state_count; i++) { | 858 | for (i = 0; i < data->acpi_data->state_count; i++) { |
829 | u32 index; | 859 | u32 index; |
830 | 860 | ||
831 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; | 861 | index = data->acpi_data->states[i].control & HW_PSTATE_MASK; |
832 | if (index > data->max_hw_pstate) { | 862 | if (index > data->max_hw_pstate) { |
833 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); | 863 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); |
834 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); | 864 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); |
@@ -844,7 +874,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
844 | 874 | ||
845 | powernow_table[i].index = index; | 875 | powernow_table[i].index = index; |
846 | 876 | ||
847 | powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; | 877 | powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; |
848 | } | 878 | } |
849 | return 0; | 879 | return 0; |
850 | } | 880 | } |
@@ -853,16 +883,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
853 | { | 883 | { |
854 | int i; | 884 | int i; |
855 | int cntlofreq = 0; | 885 | int cntlofreq = 0; |
856 | for (i = 0; i < data->acpi_data.state_count; i++) { | 886 | for (i = 0; i < data->acpi_data->state_count; i++) { |
857 | u32 fid; | 887 | u32 fid; |
858 | u32 vid; | 888 | u32 vid; |
859 | 889 | ||
860 | if (data->exttype) { | 890 | if (data->exttype) { |
861 | fid = data->acpi_data.states[i].status & EXT_FID_MASK; | 891 | fid = data->acpi_data->states[i].status & EXT_FID_MASK; |
862 | vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; | 892 | vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; |
863 | } else { | 893 | } else { |
864 | fid = data->acpi_data.states[i].control & FID_MASK; | 894 | fid = data->acpi_data->states[i].control & FID_MASK; |
865 | vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; | 895 | vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; |
866 | } | 896 | } |
867 | 897 | ||
868 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); | 898 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); |
@@ -903,10 +933,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
903 | cntlofreq = i; | 933 | cntlofreq = i; |
904 | } | 934 | } |
905 | 935 | ||
906 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { | 936 | if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { |
907 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | 937 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", |
908 | powernow_table[i].frequency, | 938 | powernow_table[i].frequency, |
909 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); | 939 | (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); |
910 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | 940 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
911 | continue; | 941 | continue; |
912 | } | 942 | } |
@@ -916,11 +946,12 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
916 | 946 | ||
917 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | 947 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) |
918 | { | 948 | { |
919 | if (data->acpi_data.state_count) | 949 | if (data->acpi_data->state_count) |
920 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); | 950 | acpi_processor_unregister_performance(data->acpi_data, data->cpu); |
921 | } | 951 | } |
922 | 952 | ||
923 | #else | 953 | #else |
954 | static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; } | ||
924 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | 955 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
925 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | 956 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
926 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | 957 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
@@ -1017,7 +1048,6 @@ static int transition_frequency_pstate(struct powernow_k8_data *data, unsigned i | |||
1017 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) | 1048 | static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsigned relation) |
1018 | { | 1049 | { |
1019 | cpumask_t oldmask; | 1050 | cpumask_t oldmask; |
1020 | cpumask_of_cpu_ptr(cpu_mask, pol->cpu); | ||
1021 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); | 1051 | struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu); |
1022 | u32 checkfid; | 1052 | u32 checkfid; |
1023 | u32 checkvid; | 1053 | u32 checkvid; |
@@ -1032,7 +1062,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, unsigned targfreq, unsi | |||
1032 | 1062 | ||
1033 | /* only run on specific CPU from here on */ | 1063 | /* only run on specific CPU from here on */ |
1034 | oldmask = current->cpus_allowed; | 1064 | oldmask = current->cpus_allowed; |
1035 | set_cpus_allowed_ptr(current, cpu_mask); | 1065 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); |
1036 | 1066 | ||
1037 | if (smp_processor_id() != pol->cpu) { | 1067 | if (smp_processor_id() != pol->cpu) { |
1038 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1068 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1106,8 +1136,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol) | |||
1106 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | 1136 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) |
1107 | { | 1137 | { |
1108 | struct powernow_k8_data *data; | 1138 | struct powernow_k8_data *data; |
1109 | cpumask_t oldmask; | 1139 | cpumask_t oldmask = CPU_MASK_ALL; |
1110 | cpumask_of_cpu_ptr_declare(newmask); | ||
1111 | int rc; | 1140 | int rc; |
1112 | 1141 | ||
1113 | if (!cpu_online(pol->cpu)) | 1142 | if (!cpu_online(pol->cpu)) |
@@ -1159,8 +1188,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1159 | 1188 | ||
1160 | /* only run on specific CPU from here on */ | 1189 | /* only run on specific CPU from here on */ |
1161 | oldmask = current->cpus_allowed; | 1190 | oldmask = current->cpus_allowed; |
1162 | cpumask_of_cpu_ptr_next(newmask, pol->cpu); | 1191 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(pol->cpu)); |
1163 | set_cpus_allowed_ptr(current, newmask); | ||
1164 | 1192 | ||
1165 | if (smp_processor_id() != pol->cpu) { | 1193 | if (smp_processor_id() != pol->cpu) { |
1166 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); | 1194 | printk(KERN_ERR PFX "limiting to cpu %u failed\n", pol->cpu); |
@@ -1181,10 +1209,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
1181 | /* run on any CPU again */ | 1209 | /* run on any CPU again */ |
1182 | set_cpus_allowed_ptr(current, &oldmask); | 1210 | set_cpus_allowed_ptr(current, &oldmask); |
1183 | 1211 | ||
1184 | if (cpu_family == CPU_HW_PSTATE) | 1212 | pol->cpus = data->starting_core_affinity; |
1185 | pol->cpus = *newmask; | ||
1186 | else | ||
1187 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); | ||
1188 | data->available_cores = &(pol->cpus); | 1213 | data->available_cores = &(pol->cpus); |
1189 | 1214 | ||
1190 | /* Take a crude guess here. | 1215 | /* Take a crude guess here. |
@@ -1248,7 +1273,6 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1248 | { | 1273 | { |
1249 | struct powernow_k8_data *data; | 1274 | struct powernow_k8_data *data; |
1250 | cpumask_t oldmask = current->cpus_allowed; | 1275 | cpumask_t oldmask = current->cpus_allowed; |
1251 | cpumask_of_cpu_ptr(newmask, cpu); | ||
1252 | unsigned int khz = 0; | 1276 | unsigned int khz = 0; |
1253 | unsigned int first; | 1277 | unsigned int first; |
1254 | 1278 | ||
@@ -1258,7 +1282,7 @@ static unsigned int powernowk8_get (unsigned int cpu) | |||
1258 | if (!data) | 1282 | if (!data) |
1259 | return -EINVAL; | 1283 | return -EINVAL; |
1260 | 1284 | ||
1261 | set_cpus_allowed_ptr(current, newmask); | 1285 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
1262 | if (smp_processor_id() != cpu) { | 1286 | if (smp_processor_id() != cpu) { |
1263 | printk(KERN_ERR PFX | 1287 | printk(KERN_ERR PFX |
1264 | "limiting to CPU %d failed in powernowk8_get\n", cpu); | 1288 | "limiting to CPU %d failed in powernowk8_get\n", cpu); |
@@ -1308,6 +1332,7 @@ static int __cpuinit powernowk8_init(void) | |||
1308 | } | 1332 | } |
1309 | 1333 | ||
1310 | if (supported_cpus == num_online_cpus()) { | 1334 | if (supported_cpus == num_online_cpus()) { |
1335 | powernow_k8_cpu_preinit_acpi(); | ||
1311 | printk(KERN_INFO PFX "Found %d %s " | 1336 | printk(KERN_INFO PFX "Found %d %s " |
1312 | "processors (%d cpu cores) (" VERSION ")\n", | 1337 | "processors (%d cpu cores) (" VERSION ")\n", |
1313 | num_online_nodes(), | 1338 | num_online_nodes(), |
@@ -1324,6 +1349,10 @@ static void __exit powernowk8_exit(void) | |||
1324 | dprintk("exit\n"); | 1349 | dprintk("exit\n"); |
1325 | 1350 | ||
1326 | cpufreq_unregister_driver(&cpufreq_amd64_driver); | 1351 | cpufreq_unregister_driver(&cpufreq_amd64_driver); |
1352 | |||
1353 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
1354 | free_percpu(acpi_perf_data); | ||
1355 | #endif | ||
1327 | } | 1356 | } |
1328 | 1357 | ||
1329 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); | 1358 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index ab48cfed4d9..a62612cd4be 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
@@ -33,12 +33,13 @@ struct powernow_k8_data { | |||
33 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | 33 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
34 | /* the acpi table needs to be kept. it's only available if ACPI was | 34 | /* the acpi table needs to be kept. it's only available if ACPI was |
35 | * used to determine valid frequency/vid/fid states */ | 35 | * used to determine valid frequency/vid/fid states */ |
36 | struct acpi_processor_performance acpi_data; | 36 | struct acpi_processor_performance *acpi_data; |
37 | #endif | 37 | #endif |
38 | /* we need to keep track of associated cores, but let cpufreq | 38 | /* we need to keep track of associated cores, but let cpufreq |
39 | * handle hotplug events - so just point at cpufreq pol->cpus | 39 | * handle hotplug events - so just point at cpufreq pol->cpus |
40 | * structure */ | 40 | * structure */ |
41 | cpumask_t *available_cores; | 41 | cpumask_t *available_cores; |
42 | cpumask_t starting_core_affinity; | ||
42 | }; | 43 | }; |
43 | 44 | ||
44 | 45 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c index ca2ac13b7af..15e13c01cc3 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c | |||
@@ -324,10 +324,9 @@ static unsigned int get_cur_freq(unsigned int cpu) | |||
324 | unsigned l, h; | 324 | unsigned l, h; |
325 | unsigned clock_freq; | 325 | unsigned clock_freq; |
326 | cpumask_t saved_mask; | 326 | cpumask_t saved_mask; |
327 | cpumask_of_cpu_ptr(new_mask, cpu); | ||
328 | 327 | ||
329 | saved_mask = current->cpus_allowed; | 328 | saved_mask = current->cpus_allowed; |
330 | set_cpus_allowed_ptr(current, new_mask); | 329 | set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
331 | if (smp_processor_id() != cpu) | 330 | if (smp_processor_id() != cpu) |
332 | return 0; | 331 | return 0; |
333 | 332 | ||
@@ -585,15 +584,12 @@ static int centrino_target (struct cpufreq_policy *policy, | |||
585 | * Best effort undo.. | 584 | * Best effort undo.. |
586 | */ | 585 | */ |
587 | 586 | ||
588 | if (!cpus_empty(*covered_cpus)) { | 587 | if (!cpus_empty(*covered_cpus)) |
589 | cpumask_of_cpu_ptr_declare(new_mask); | ||
590 | |||
591 | for_each_cpu_mask_nr(j, *covered_cpus) { | 588 | for_each_cpu_mask_nr(j, *covered_cpus) { |
592 | cpumask_of_cpu_ptr_next(new_mask, j); | 589 | set_cpus_allowed_ptr(current, |
593 | set_cpus_allowed_ptr(current, new_mask); | 590 | &cpumask_of_cpu(j)); |
594 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); | 591 | wrmsr(MSR_IA32_PERF_CTL, oldmsr, h); |
595 | } | 592 | } |
596 | } | ||
597 | 593 | ||
598 | tmp = freqs.new; | 594 | tmp = freqs.new; |
599 | freqs.new = freqs.old; | 595 | freqs.new = freqs.old; |
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c index 2f3728dc24f..191f7263c61 100644 --- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c +++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c | |||
@@ -244,8 +244,7 @@ static unsigned int _speedstep_get(const cpumask_t *cpus) | |||
244 | 244 | ||
245 | static unsigned int speedstep_get(unsigned int cpu) | 245 | static unsigned int speedstep_get(unsigned int cpu) |
246 | { | 246 | { |
247 | cpumask_of_cpu_ptr(newmask, cpu); | 247 | return _speedstep_get(&cpumask_of_cpu(cpu)); |
248 | return _speedstep_get(newmask); | ||
249 | } | 248 | } |
250 | 249 | ||
251 | /** | 250 | /** |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 1677b55371a..3f46afbb1cf 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -540,7 +540,6 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
540 | unsigned long j; | 540 | unsigned long j; |
541 | int retval; | 541 | int retval; |
542 | cpumask_t oldmask; | 542 | cpumask_t oldmask; |
543 | cpumask_of_cpu_ptr(newmask, cpu); | ||
544 | 543 | ||
545 | if (num_cache_leaves == 0) | 544 | if (num_cache_leaves == 0) |
546 | return -ENOENT; | 545 | return -ENOENT; |
@@ -551,7 +550,7 @@ static int __cpuinit detect_cache_attributes(unsigned int cpu) | |||
551 | return -ENOMEM; | 550 | return -ENOMEM; |
552 | 551 | ||
553 | oldmask = current->cpus_allowed; | 552 | oldmask = current->cpus_allowed; |
554 | retval = set_cpus_allowed_ptr(current, newmask); | 553 | retval = set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu)); |
555 | if (retval) | 554 | if (retval) |
556 | goto out; | 555 | goto out; |
557 | 556 | ||