diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-09-05 12:56:57 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-09-05 12:56:57 -0400 |
| commit | 616ad8c44281c0c6711a72b560e01ec335ff27e0 (patch) | |
| tree | 0a20453ffedb09db6fb41a0c2208ccc2c7751d3a /arch/x86/kernel/cpu | |
| parent | 99809963c99e1ed868d9ebeb4a5e7ee1cbe0309f (diff) | |
| parent | b380b0d4f7dffcc235c0facefa537d4655619101 (diff) | |
Merge branch 'linus' into x86/defconfig
Diffstat (limited to 'arch/x86/kernel/cpu')
| -rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 17 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/bugs.c | 6 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.c | 109 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/powernow-k8.h | 3 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cyrix.c | 18 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 18 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 20 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/mtrr/main.c | 5 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/perfctr-watchdog.c | 8 |
10 files changed, 93 insertions, 116 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 84a8220a6072..a6ef672adbba 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
| @@ -56,9 +56,22 @@ void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | |||
| 56 | 56 | ||
| 57 | switch (c->x86_vendor) { | 57 | switch (c->x86_vendor) { |
| 58 | case X86_VENDOR_INTEL: | 58 | case X86_VENDOR_INTEL: |
| 59 | if (c->x86 == 0xF || (c->x86 == 6 && c->x86_model >= 15)) | 59 | /* |
| 60 | * There is a known erratum on Pentium III and Core Solo | ||
| 61 | * and Core Duo CPUs. | ||
| 62 | * " Page with PAT set to WC while associated MTRR is UC | ||
| 63 | * may consolidate to UC " | ||
| 64 | * Because of this erratum, it is better to stick with | ||
| 65 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
| 66 | * | ||
| 67 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
| 68 | */ | ||
| 69 | if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15)) | ||
| 60 | return; | 70 | return; |
| 61 | break; | 71 | |
| 72 | pat_disable("PAT WC disabled due to known CPU erratum."); | ||
| 73 | return; | ||
| 74 | |||
| 62 | case X86_VENDOR_AMD: | 75 | case X86_VENDOR_AMD: |
| 63 | case X86_VENDOR_CENTAUR: | 76 | case X86_VENDOR_CENTAUR: |
| 64 | case X86_VENDOR_TRANSMETA: | 77 | case X86_VENDOR_TRANSMETA: |
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c9b58a806e85..c8e315f1aa83 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c | |||
| @@ -50,6 +50,8 @@ static double __initdata y = 3145727.0; | |||
| 50 | */ | 50 | */ |
| 51 | static void __init check_fpu(void) | 51 | static void __init check_fpu(void) |
| 52 | { | 52 | { |
| 53 | s32 fdiv_bug; | ||
| 54 | |||
| 53 | if (!boot_cpu_data.hard_math) { | 55 | if (!boot_cpu_data.hard_math) { |
| 54 | #ifndef CONFIG_MATH_EMULATION | 56 | #ifndef CONFIG_MATH_EMULATION |
| 55 | printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); | 57 | printk(KERN_EMERG "No coprocessor found and no math emulation present.\n"); |
| @@ -74,8 +76,10 @@ static void __init check_fpu(void) | |||
| 74 | "fistpl %0\n\t" | 76 | "fistpl %0\n\t" |
| 75 | "fwait\n\t" | 77 | "fwait\n\t" |
| 76 | "fninit" | 78 | "fninit" |
| 77 | : "=m" (*&boot_cpu_data.fdiv_bug) | 79 | : "=m" (*&fdiv_bug) |
| 78 | : "m" (*&x), "m" (*&y)); | 80 | : "m" (*&x), "m" (*&y)); |
| 81 | |||
| 82 | boot_cpu_data.fdiv_bug = fdiv_bug; | ||
| 79 | if (boot_cpu_data.fdiv_bug) | 83 | if (boot_cpu_data.fdiv_bug) |
| 80 | printk("Hmm, FPU with FDIV bug.\n"); | 84 | printk("Hmm, FPU with FDIV bug.\n"); |
| 81 | } | 85 | } |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c index 4e7271999a74..84bb395038d8 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c | |||
| @@ -737,63 +737,44 @@ static int find_psb_table(struct powernow_k8_data *data) | |||
| 737 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | 737 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
| 738 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) | 738 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) |
| 739 | { | 739 | { |
| 740 | if (!data->acpi_data->state_count || (cpu_family == CPU_HW_PSTATE)) | 740 | if (!data->acpi_data.state_count || (cpu_family == CPU_HW_PSTATE)) |
| 741 | return; | 741 | return; |
| 742 | 742 | ||
| 743 | data->irt = (data->acpi_data->states[index].control >> IRT_SHIFT) & IRT_MASK; | 743 | data->irt = (data->acpi_data.states[index].control >> IRT_SHIFT) & IRT_MASK; |
| 744 | data->rvo = (data->acpi_data->states[index].control >> RVO_SHIFT) & RVO_MASK; | 744 | data->rvo = (data->acpi_data.states[index].control >> RVO_SHIFT) & RVO_MASK; |
| 745 | data->exttype = (data->acpi_data->states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; | 745 | data->exttype = (data->acpi_data.states[index].control >> EXT_TYPE_SHIFT) & EXT_TYPE_MASK; |
| 746 | data->plllock = (data->acpi_data->states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; | 746 | data->plllock = (data->acpi_data.states[index].control >> PLL_L_SHIFT) & PLL_L_MASK; |
| 747 | data->vidmvs = 1 << ((data->acpi_data->states[index].control >> MVS_SHIFT) & MVS_MASK); | 747 | data->vidmvs = 1 << ((data->acpi_data.states[index].control >> MVS_SHIFT) & MVS_MASK); |
| 748 | data->vstable = (data->acpi_data->states[index].control >> VST_SHIFT) & VST_MASK; | 748 | data->vstable = (data->acpi_data.states[index].control >> VST_SHIFT) & VST_MASK; |
| 749 | } | ||
| 750 | |||
| 751 | |||
| 752 | static struct acpi_processor_performance *acpi_perf_data; | ||
| 753 | static int preregister_valid; | ||
| 754 | |||
| 755 | static int powernow_k8_cpu_preinit_acpi(void) | ||
| 756 | { | ||
| 757 | acpi_perf_data = alloc_percpu(struct acpi_processor_performance); | ||
| 758 | if (!acpi_perf_data) | ||
| 759 | return -ENODEV; | ||
| 760 | |||
| 761 | if (acpi_processor_preregister_performance(acpi_perf_data)) | ||
| 762 | return -ENODEV; | ||
| 763 | else | ||
| 764 | preregister_valid = 1; | ||
| 765 | return 0; | ||
| 766 | } | 749 | } |
| 767 | 750 | ||
| 768 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | 751 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) |
| 769 | { | 752 | { |
| 770 | struct cpufreq_frequency_table *powernow_table; | 753 | struct cpufreq_frequency_table *powernow_table; |
| 771 | int ret_val; | 754 | int ret_val; |
| 772 | int cpu = 0; | ||
| 773 | 755 | ||
| 774 | data->acpi_data = percpu_ptr(acpi_perf_data, cpu); | 756 | if (acpi_processor_register_performance(&data->acpi_data, data->cpu)) { |
| 775 | if (acpi_processor_register_performance(data->acpi_data, data->cpu)) { | ||
| 776 | dprintk("register performance failed: bad ACPI data\n"); | 757 | dprintk("register performance failed: bad ACPI data\n"); |
| 777 | return -EIO; | 758 | return -EIO; |
| 778 | } | 759 | } |
| 779 | 760 | ||
| 780 | /* verify the data contained in the ACPI structures */ | 761 | /* verify the data contained in the ACPI structures */ |
| 781 | if (data->acpi_data->state_count <= 1) { | 762 | if (data->acpi_data.state_count <= 1) { |
| 782 | dprintk("No ACPI P-States\n"); | 763 | dprintk("No ACPI P-States\n"); |
| 783 | goto err_out; | 764 | goto err_out; |
| 784 | } | 765 | } |
| 785 | 766 | ||
| 786 | if ((data->acpi_data->control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || | 767 | if ((data->acpi_data.control_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE) || |
| 787 | (data->acpi_data->status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { | 768 | (data->acpi_data.status_register.space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)) { |
| 788 | dprintk("Invalid control/status registers (%x - %x)\n", | 769 | dprintk("Invalid control/status registers (%x - %x)\n", |
| 789 | data->acpi_data->control_register.space_id, | 770 | data->acpi_data.control_register.space_id, |
| 790 | data->acpi_data->status_register.space_id); | 771 | data->acpi_data.status_register.space_id); |
| 791 | goto err_out; | 772 | goto err_out; |
| 792 | } | 773 | } |
| 793 | 774 | ||
| 794 | /* fill in data->powernow_table */ | 775 | /* fill in data->powernow_table */ |
| 795 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) | 776 | powernow_table = kmalloc((sizeof(struct cpufreq_frequency_table) |
| 796 | * (data->acpi_data->state_count + 1)), GFP_KERNEL); | 777 | * (data->acpi_data.state_count + 1)), GFP_KERNEL); |
| 797 | if (!powernow_table) { | 778 | if (!powernow_table) { |
| 798 | dprintk("powernow_table memory alloc failure\n"); | 779 | dprintk("powernow_table memory alloc failure\n"); |
| 799 | goto err_out; | 780 | goto err_out; |
| @@ -806,12 +787,12 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
| 806 | if (ret_val) | 787 | if (ret_val) |
| 807 | goto err_out_mem; | 788 | goto err_out_mem; |
| 808 | 789 | ||
| 809 | powernow_table[data->acpi_data->state_count].frequency = CPUFREQ_TABLE_END; | 790 | powernow_table[data->acpi_data.state_count].frequency = CPUFREQ_TABLE_END; |
| 810 | powernow_table[data->acpi_data->state_count].index = 0; | 791 | powernow_table[data->acpi_data.state_count].index = 0; |
| 811 | data->powernow_table = powernow_table; | 792 | data->powernow_table = powernow_table; |
| 812 | 793 | ||
| 813 | /* fill in data */ | 794 | /* fill in data */ |
| 814 | data->numps = data->acpi_data->state_count; | 795 | data->numps = data->acpi_data.state_count; |
| 815 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) | 796 | if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu) |
| 816 | print_basics(data); | 797 | print_basics(data); |
| 817 | powernow_k8_acpi_pst_values(data, 0); | 798 | powernow_k8_acpi_pst_values(data, 0); |
| @@ -819,31 +800,16 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) | |||
| 819 | /* notify BIOS that we exist */ | 800 | /* notify BIOS that we exist */ |
| 820 | acpi_processor_notify_smm(THIS_MODULE); | 801 | acpi_processor_notify_smm(THIS_MODULE); |
| 821 | 802 | ||
| 822 | /* determine affinity, from ACPI if available */ | ||
| 823 | if (preregister_valid) { | ||
| 824 | if ((data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ALL) || | ||
| 825 | (data->acpi_data->shared_type == CPUFREQ_SHARED_TYPE_ANY)) | ||
| 826 | data->starting_core_affinity = data->acpi_data->shared_cpu_map; | ||
| 827 | else | ||
| 828 | data->starting_core_affinity = cpumask_of_cpu(data->cpu); | ||
| 829 | } else { | ||
| 830 | /* best guess from family if not */ | ||
| 831 | if (cpu_family == CPU_HW_PSTATE) | ||
| 832 | data->starting_core_affinity = cpumask_of_cpu(data->cpu); | ||
| 833 | else | ||
| 834 | data->starting_core_affinity = per_cpu(cpu_core_map, data->cpu); | ||
| 835 | } | ||
| 836 | |||
| 837 | return 0; | 803 | return 0; |
| 838 | 804 | ||
| 839 | err_out_mem: | 805 | err_out_mem: |
| 840 | kfree(powernow_table); | 806 | kfree(powernow_table); |
| 841 | 807 | ||
| 842 | err_out: | 808 | err_out: |
| 843 | acpi_processor_unregister_performance(data->acpi_data, data->cpu); | 809 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); |
| 844 | 810 | ||
| 845 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ | 811 | /* data->acpi_data.state_count informs us at ->exit() whether ACPI was used */ |
| 846 | data->acpi_data->state_count = 0; | 812 | data->acpi_data.state_count = 0; |
| 847 | 813 | ||
| 848 | return -ENODEV; | 814 | return -ENODEV; |
| 849 | } | 815 | } |
| @@ -855,10 +821,10 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
| 855 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); | 821 | rdmsr(MSR_PSTATE_CUR_LIMIT, hi, lo); |
| 856 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; | 822 | data->max_hw_pstate = (hi & HW_PSTATE_MAX_MASK) >> HW_PSTATE_MAX_SHIFT; |
| 857 | 823 | ||
| 858 | for (i = 0; i < data->acpi_data->state_count; i++) { | 824 | for (i = 0; i < data->acpi_data.state_count; i++) { |
| 859 | u32 index; | 825 | u32 index; |
| 860 | 826 | ||
| 861 | index = data->acpi_data->states[i].control & HW_PSTATE_MASK; | 827 | index = data->acpi_data.states[i].control & HW_PSTATE_MASK; |
| 862 | if (index > data->max_hw_pstate) { | 828 | if (index > data->max_hw_pstate) { |
| 863 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); | 829 | printk(KERN_ERR PFX "invalid pstate %d - bad value %d.\n", i, index); |
| 864 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); | 830 | printk(KERN_ERR PFX "Please report to BIOS manufacturer\n"); |
| @@ -874,7 +840,7 @@ static int fill_powernow_table_pstate(struct powernow_k8_data *data, struct cpuf | |||
| 874 | 840 | ||
| 875 | powernow_table[i].index = index; | 841 | powernow_table[i].index = index; |
| 876 | 842 | ||
| 877 | powernow_table[i].frequency = data->acpi_data->states[i].core_frequency * 1000; | 843 | powernow_table[i].frequency = data->acpi_data.states[i].core_frequency * 1000; |
| 878 | } | 844 | } |
| 879 | return 0; | 845 | return 0; |
| 880 | } | 846 | } |
| @@ -883,16 +849,16 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
| 883 | { | 849 | { |
| 884 | int i; | 850 | int i; |
| 885 | int cntlofreq = 0; | 851 | int cntlofreq = 0; |
| 886 | for (i = 0; i < data->acpi_data->state_count; i++) { | 852 | for (i = 0; i < data->acpi_data.state_count; i++) { |
| 887 | u32 fid; | 853 | u32 fid; |
| 888 | u32 vid; | 854 | u32 vid; |
| 889 | 855 | ||
| 890 | if (data->exttype) { | 856 | if (data->exttype) { |
| 891 | fid = data->acpi_data->states[i].status & EXT_FID_MASK; | 857 | fid = data->acpi_data.states[i].status & EXT_FID_MASK; |
| 892 | vid = (data->acpi_data->states[i].status >> VID_SHIFT) & EXT_VID_MASK; | 858 | vid = (data->acpi_data.states[i].status >> VID_SHIFT) & EXT_VID_MASK; |
| 893 | } else { | 859 | } else { |
| 894 | fid = data->acpi_data->states[i].control & FID_MASK; | 860 | fid = data->acpi_data.states[i].control & FID_MASK; |
| 895 | vid = (data->acpi_data->states[i].control >> VID_SHIFT) & VID_MASK; | 861 | vid = (data->acpi_data.states[i].control >> VID_SHIFT) & VID_MASK; |
| 896 | } | 862 | } |
| 897 | 863 | ||
| 898 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); | 864 | dprintk(" %d : fid 0x%x, vid 0x%x\n", i, fid, vid); |
| @@ -933,10 +899,10 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
| 933 | cntlofreq = i; | 899 | cntlofreq = i; |
| 934 | } | 900 | } |
| 935 | 901 | ||
| 936 | if (powernow_table[i].frequency != (data->acpi_data->states[i].core_frequency * 1000)) { | 902 | if (powernow_table[i].frequency != (data->acpi_data.states[i].core_frequency * 1000)) { |
| 937 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", | 903 | printk(KERN_INFO PFX "invalid freq entries %u kHz vs. %u kHz\n", |
| 938 | powernow_table[i].frequency, | 904 | powernow_table[i].frequency, |
| 939 | (unsigned int) (data->acpi_data->states[i].core_frequency * 1000)); | 905 | (unsigned int) (data->acpi_data.states[i].core_frequency * 1000)); |
| 940 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; | 906 | powernow_table[i].frequency = CPUFREQ_ENTRY_INVALID; |
| 941 | continue; | 907 | continue; |
| 942 | } | 908 | } |
| @@ -946,12 +912,11 @@ static int fill_powernow_table_fidvid(struct powernow_k8_data *data, struct cpuf | |||
| 946 | 912 | ||
| 947 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) | 913 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) |
| 948 | { | 914 | { |
| 949 | if (data->acpi_data->state_count) | 915 | if (data->acpi_data.state_count) |
| 950 | acpi_processor_unregister_performance(data->acpi_data, data->cpu); | 916 | acpi_processor_unregister_performance(&data->acpi_data, data->cpu); |
| 951 | } | 917 | } |
| 952 | 918 | ||
| 953 | #else | 919 | #else |
| 954 | static int powernow_k8_cpu_preinit_acpi(void) { return -ENODEV; } | ||
| 955 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } | 920 | static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data) { return -ENODEV; } |
| 956 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } | 921 | static void powernow_k8_cpu_exit_acpi(struct powernow_k8_data *data) { return; } |
| 957 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } | 922 | static void powernow_k8_acpi_pst_values(struct powernow_k8_data *data, unsigned int index) { return; } |
| @@ -1136,7 +1101,7 @@ static int powernowk8_verify(struct cpufreq_policy *pol) | |||
| 1136 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | 1101 | static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) |
| 1137 | { | 1102 | { |
| 1138 | struct powernow_k8_data *data; | 1103 | struct powernow_k8_data *data; |
| 1139 | cpumask_t oldmask = CPU_MASK_ALL; | 1104 | cpumask_t oldmask; |
| 1140 | int rc; | 1105 | int rc; |
| 1141 | 1106 | ||
| 1142 | if (!cpu_online(pol->cpu)) | 1107 | if (!cpu_online(pol->cpu)) |
| @@ -1209,7 +1174,10 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol) | |||
| 1209 | /* run on any CPU again */ | 1174 | /* run on any CPU again */ |
| 1210 | set_cpus_allowed_ptr(current, &oldmask); | 1175 | set_cpus_allowed_ptr(current, &oldmask); |
| 1211 | 1176 | ||
| 1212 | pol->cpus = data->starting_core_affinity; | 1177 | if (cpu_family == CPU_HW_PSTATE) |
| 1178 | pol->cpus = cpumask_of_cpu(pol->cpu); | ||
| 1179 | else | ||
| 1180 | pol->cpus = per_cpu(cpu_core_map, pol->cpu); | ||
| 1213 | data->available_cores = &(pol->cpus); | 1181 | data->available_cores = &(pol->cpus); |
| 1214 | 1182 | ||
| 1215 | /* Take a crude guess here. | 1183 | /* Take a crude guess here. |
| @@ -1332,7 +1300,6 @@ static int __cpuinit powernowk8_init(void) | |||
| 1332 | } | 1300 | } |
| 1333 | 1301 | ||
| 1334 | if (supported_cpus == num_online_cpus()) { | 1302 | if (supported_cpus == num_online_cpus()) { |
| 1335 | powernow_k8_cpu_preinit_acpi(); | ||
| 1336 | printk(KERN_INFO PFX "Found %d %s " | 1303 | printk(KERN_INFO PFX "Found %d %s " |
| 1337 | "processors (%d cpu cores) (" VERSION ")\n", | 1304 | "processors (%d cpu cores) (" VERSION ")\n", |
| 1338 | num_online_nodes(), | 1305 | num_online_nodes(), |
| @@ -1349,10 +1316,6 @@ static void __exit powernowk8_exit(void) | |||
| 1349 | dprintk("exit\n"); | 1316 | dprintk("exit\n"); |
| 1350 | 1317 | ||
| 1351 | cpufreq_unregister_driver(&cpufreq_amd64_driver); | 1318 | cpufreq_unregister_driver(&cpufreq_amd64_driver); |
| 1352 | |||
| 1353 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | ||
| 1354 | free_percpu(acpi_perf_data); | ||
| 1355 | #endif | ||
| 1356 | } | 1319 | } |
| 1357 | 1320 | ||
| 1358 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); | 1321 | MODULE_AUTHOR("Paul Devriendt <paul.devriendt@amd.com> and Mark Langsdorf <mark.langsdorf@amd.com>"); |
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h index a62612cd4be8..ab48cfed4d96 100644 --- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.h +++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.h | |||
| @@ -33,13 +33,12 @@ struct powernow_k8_data { | |||
| 33 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI | 33 | #ifdef CONFIG_X86_POWERNOW_K8_ACPI |
| 34 | /* the acpi table needs to be kept. it's only available if ACPI was | 34 | /* the acpi table needs to be kept. it's only available if ACPI was |
| 35 | * used to determine valid frequency/vid/fid states */ | 35 | * used to determine valid frequency/vid/fid states */ |
| 36 | struct acpi_processor_performance *acpi_data; | 36 | struct acpi_processor_performance acpi_data; |
| 37 | #endif | 37 | #endif |
| 38 | /* we need to keep track of associated cores, but let cpufreq | 38 | /* we need to keep track of associated cores, but let cpufreq |
| 39 | * handle hotplug events - so just point at cpufreq pol->cpus | 39 | * handle hotplug events - so just point at cpufreq pol->cpus |
| 40 | * structure */ | 40 | * structure */ |
| 41 | cpumask_t *available_cores; | 41 | cpumask_t *available_cores; |
| 42 | cpumask_t starting_core_affinity; | ||
| 43 | }; | 42 | }; |
| 44 | 43 | ||
| 45 | 44 | ||
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 3fd7a67bb06a..e710a21bb6e8 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
| @@ -134,23 +134,6 @@ static void __cpuinit set_cx86_memwb(void) | |||
| 134 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); | 134 | setCx86(CX86_CCR2, getCx86(CX86_CCR2) | 0x14); |
| 135 | } | 135 | } |
| 136 | 136 | ||
| 137 | static void __cpuinit set_cx86_inc(void) | ||
| 138 | { | ||
| 139 | unsigned char ccr3; | ||
| 140 | |||
| 141 | printk(KERN_INFO "Enable Incrementor on Cyrix/NSC processor.\n"); | ||
| 142 | |||
| 143 | ccr3 = getCx86(CX86_CCR3); | ||
| 144 | setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ | ||
| 145 | /* PCR1 -- Performance Control */ | ||
| 146 | /* Incrementor on, whatever that is */ | ||
| 147 | setCx86(CX86_PCR1, getCx86(CX86_PCR1) | 0x02); | ||
| 148 | /* PCR0 -- Performance Control */ | ||
| 149 | /* Incrementor Margin 10 */ | ||
| 150 | setCx86(CX86_PCR0, getCx86(CX86_PCR0) | 0x04); | ||
| 151 | setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ | ||
| 152 | } | ||
| 153 | |||
| 154 | /* | 137 | /* |
| 155 | * Configure later MediaGX and/or Geode processor. | 138 | * Configure later MediaGX and/or Geode processor. |
| 156 | */ | 139 | */ |
| @@ -174,7 +157,6 @@ static void __cpuinit geode_configure(void) | |||
| 174 | 157 | ||
| 175 | set_cx86_memwb(); | 158 | set_cx86_memwb(); |
| 176 | set_cx86_reorder(); | 159 | set_cx86_reorder(); |
| 177 | set_cx86_inc(); | ||
| 178 | 160 | ||
| 179 | local_irq_restore(flags); | 161 | local_irq_restore(flags); |
| 180 | } | 162 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index 65a339678ece..726a5fcdf341 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
| @@ -759,6 +759,7 @@ static struct sysdev_class mce_sysclass = { | |||
| 759 | }; | 759 | }; |
| 760 | 760 | ||
| 761 | DEFINE_PER_CPU(struct sys_device, device_mce); | 761 | DEFINE_PER_CPU(struct sys_device, device_mce); |
| 762 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu) __cpuinitdata; | ||
| 762 | 763 | ||
| 763 | /* Why are there no generic functions for this? */ | 764 | /* Why are there no generic functions for this? */ |
| 764 | #define ACCESSOR(name, var, start) \ | 765 | #define ACCESSOR(name, var, start) \ |
| @@ -883,9 +884,13 @@ static int __cpuinit mce_cpu_callback(struct notifier_block *nfb, | |||
| 883 | case CPU_ONLINE: | 884 | case CPU_ONLINE: |
| 884 | case CPU_ONLINE_FROZEN: | 885 | case CPU_ONLINE_FROZEN: |
| 885 | mce_create_device(cpu); | 886 | mce_create_device(cpu); |
| 887 | if (threshold_cpu_callback) | ||
| 888 | threshold_cpu_callback(action, cpu); | ||
| 886 | break; | 889 | break; |
| 887 | case CPU_DEAD: | 890 | case CPU_DEAD: |
| 888 | case CPU_DEAD_FROZEN: | 891 | case CPU_DEAD_FROZEN: |
| 892 | if (threshold_cpu_callback) | ||
| 893 | threshold_cpu_callback(action, cpu); | ||
| 889 | mce_remove_device(cpu); | 894 | mce_remove_device(cpu); |
| 890 | break; | 895 | break; |
| 891 | } | 896 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 88736cadbaa6..5eb390a4b2e9 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
| @@ -628,6 +628,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
| 628 | deallocate_threshold_block(cpu, bank); | 628 | deallocate_threshold_block(cpu, bank); |
| 629 | 629 | ||
| 630 | free_out: | 630 | free_out: |
| 631 | kobject_del(b->kobj); | ||
| 631 | kobject_put(b->kobj); | 632 | kobject_put(b->kobj); |
| 632 | kfree(b); | 633 | kfree(b); |
| 633 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 634 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
| @@ -645,14 +646,11 @@ static void threshold_remove_device(unsigned int cpu) | |||
| 645 | } | 646 | } |
| 646 | 647 | ||
| 647 | /* get notified when a cpu comes on/off */ | 648 | /* get notified when a cpu comes on/off */ |
| 648 | static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, | 649 | static void __cpuinit amd_64_threshold_cpu_callback(unsigned long action, |
| 649 | unsigned long action, void *hcpu) | 650 | unsigned int cpu) |
| 650 | { | 651 | { |
| 651 | /* cpu was unsigned int to begin with */ | ||
| 652 | unsigned int cpu = (unsigned long)hcpu; | ||
| 653 | |||
| 654 | if (cpu >= NR_CPUS) | 652 | if (cpu >= NR_CPUS) |
| 655 | goto out; | 653 | return; |
| 656 | 654 | ||
| 657 | switch (action) { | 655 | switch (action) { |
| 658 | case CPU_ONLINE: | 656 | case CPU_ONLINE: |
| @@ -666,14 +664,8 @@ static int __cpuinit threshold_cpu_callback(struct notifier_block *nfb, | |||
| 666 | default: | 664 | default: |
| 667 | break; | 665 | break; |
| 668 | } | 666 | } |
| 669 | out: | ||
| 670 | return NOTIFY_OK; | ||
| 671 | } | 667 | } |
| 672 | 668 | ||
| 673 | static struct notifier_block threshold_cpu_notifier __cpuinitdata = { | ||
| 674 | .notifier_call = threshold_cpu_callback, | ||
| 675 | }; | ||
| 676 | |||
| 677 | static __init int threshold_init_device(void) | 669 | static __init int threshold_init_device(void) |
| 678 | { | 670 | { |
| 679 | unsigned lcpu = 0; | 671 | unsigned lcpu = 0; |
| @@ -684,7 +676,7 @@ static __init int threshold_init_device(void) | |||
| 684 | if (err) | 676 | if (err) |
| 685 | return err; | 677 | return err; |
| 686 | } | 678 | } |
| 687 | register_hotcpu_notifier(&threshold_cpu_notifier); | 679 | threshold_cpu_callback = amd_64_threshold_cpu_callback; |
| 688 | return 0; | 680 | return 0; |
| 689 | } | 681 | } |
| 690 | 682 | ||
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index 509bd3d9eacd..cb7d3b6a80eb 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
| @@ -379,6 +379,7 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 379 | unsigned long *size, mtrr_type *type) | 379 | unsigned long *size, mtrr_type *type) |
| 380 | { | 380 | { |
| 381 | unsigned int mask_lo, mask_hi, base_lo, base_hi; | 381 | unsigned int mask_lo, mask_hi, base_lo, base_hi; |
| 382 | unsigned int tmp, hi; | ||
| 382 | 383 | ||
| 383 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); | 384 | rdmsr(MTRRphysMask_MSR(reg), mask_lo, mask_hi); |
| 384 | if ((mask_lo & 0x800) == 0) { | 385 | if ((mask_lo & 0x800) == 0) { |
| @@ -392,8 +393,23 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, | |||
| 392 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); | 393 | rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); |
| 393 | 394 | ||
| 394 | /* Work out the shifted address mask. */ | 395 | /* Work out the shifted address mask. */ |
| 395 | mask_lo = size_or_mask | mask_hi << (32 - PAGE_SHIFT) | 396 | tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; |
| 396 | | mask_lo >> PAGE_SHIFT; | 397 | mask_lo = size_or_mask | tmp; |
| 398 | /* Expand tmp with high bits to all 1s*/ | ||
| 399 | hi = fls(tmp); | ||
| 400 | if (hi > 0) { | ||
| 401 | tmp |= ~((1<<(hi - 1)) - 1); | ||
| 402 | |||
| 403 | if (tmp != mask_lo) { | ||
| 404 | static int once = 1; | ||
| 405 | |||
| 406 | if (once) { | ||
| 407 | printk(KERN_INFO "mtrr: your BIOS has set up an incorrect mask, fixing it up.\n"); | ||
| 408 | once = 0; | ||
| 409 | } | ||
| 410 | mask_lo = tmp; | ||
| 411 | } | ||
| 412 | } | ||
| 397 | 413 | ||
| 398 | /* This works correctly if size is a power of two, i.e. a | 414 | /* This works correctly if size is a power of two, i.e. a |
| 399 | contiguous range. */ | 415 | contiguous range. */ |
diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c index 6f23969c8faf..b117d7f8a564 100644 --- a/arch/x86/kernel/cpu/mtrr/main.c +++ b/arch/x86/kernel/cpu/mtrr/main.c | |||
| @@ -1496,11 +1496,8 @@ int __init mtrr_trim_uncached_memory(unsigned long end_pfn) | |||
| 1496 | 1496 | ||
| 1497 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ | 1497 | /* kvm/qemu doesn't have mtrr set right, don't trim them all */ |
| 1498 | if (!highest_pfn) { | 1498 | if (!highest_pfn) { |
| 1499 | if (!kvm_para_available()) { | 1499 | WARN(!kvm_para_available(), KERN_WARNING |
| 1500 | printk(KERN_WARNING | ||
| 1501 | "WARNING: strange, CPU MTRRs all blank?\n"); | 1500 | "WARNING: strange, CPU MTRRs all blank?\n"); |
| 1502 | WARN_ON(1); | ||
| 1503 | } | ||
| 1504 | return 0; | 1501 | return 0; |
| 1505 | } | 1502 | } |
| 1506 | 1503 | ||
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c index de7439f82b92..05cc22dbd4ff 100644 --- a/arch/x86/kernel/cpu/perfctr-watchdog.c +++ b/arch/x86/kernel/cpu/perfctr-watchdog.c | |||
| @@ -478,7 +478,13 @@ static int setup_p4_watchdog(unsigned nmi_hz) | |||
| 478 | perfctr_msr = MSR_P4_IQ_PERFCTR1; | 478 | perfctr_msr = MSR_P4_IQ_PERFCTR1; |
| 479 | evntsel_msr = MSR_P4_CRU_ESCR0; | 479 | evntsel_msr = MSR_P4_CRU_ESCR0; |
| 480 | cccr_msr = MSR_P4_IQ_CCCR1; | 480 | cccr_msr = MSR_P4_IQ_CCCR1; |
| 481 | cccr_val = P4_CCCR_OVF_PMI1 | P4_CCCR_ESCR_SELECT(4); | 481 | |
| 482 | /* Pentium 4 D processors don't support P4_CCCR_OVF_PMI1 */ | ||
| 483 | if (boot_cpu_data.x86_model == 4 && boot_cpu_data.x86_mask == 4) | ||
| 484 | cccr_val = P4_CCCR_OVF_PMI0; | ||
| 485 | else | ||
| 486 | cccr_val = P4_CCCR_OVF_PMI1; | ||
| 487 | cccr_val |= P4_CCCR_ESCR_SELECT(4); | ||
| 482 | } | 488 | } |
| 483 | 489 | ||
| 484 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) | 490 | evntsel = P4_ESCR_EVENT_SELECT(0x3F) |
