diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 34 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 204 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | 35 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 25 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 63 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 21 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 1 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mtrr/generic.c | 12 |
8 files changed, 219 insertions, 176 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 2cf23634b6d9..4e581fdc0a5a 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -143,37 +143,3 @@ void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | |||
143 | return; | 143 | return; |
144 | #endif | 144 | #endif |
145 | } | 145 | } |
146 | |||
147 | #ifdef CONFIG_X86_PAT | ||
148 | void __cpuinit validate_pat_support(struct cpuinfo_x86 *c) | ||
149 | { | ||
150 | if (!cpu_has_pat) | ||
151 | pat_disable("PAT not supported by CPU."); | ||
152 | |||
153 | switch (c->x86_vendor) { | ||
154 | case X86_VENDOR_INTEL: | ||
155 | /* | ||
156 | * There is a known erratum on Pentium III and Core Solo | ||
157 | * and Core Duo CPUs. | ||
158 | * " Page with PAT set to WC while associated MTRR is UC | ||
159 | * may consolidate to UC " | ||
160 | * Because of this erratum, it is better to stick with | ||
161 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
162 | * | ||
163 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
164 | */ | ||
165 | if (c->x86 > 0x6 || (c->x86 == 6 && c->x86_model >= 15)) | ||
166 | return; | ||
167 | |||
168 | pat_disable("PAT WC disabled due to known CPU erratum."); | ||
169 | return; | ||
170 | |||
171 | case X86_VENDOR_AMD: | ||
172 | case X86_VENDOR_CENTAUR: | ||
173 | case X86_VENDOR_TRANSMETA: | ||
174 | return; | ||
175 | } | ||
176 | |||
177 | pat_disable("PAT disabled. Not yet verified on this CPU type."); | ||
178 | } | ||
179 | #endif | ||
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 83492b1f93b1..275e2cb43b91 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -21,14 +21,16 @@ | |||
21 | #include <asm/asm.h> | 21 | #include <asm/asm.h> |
22 | #include <asm/numa.h> | 22 | #include <asm/numa.h> |
23 | #include <asm/smp.h> | 23 | #include <asm/smp.h> |
24 | #include <asm/cpu.h> | ||
25 | #include <asm/cpumask.h> | ||
24 | #ifdef CONFIG_X86_LOCAL_APIC | 26 | #ifdef CONFIG_X86_LOCAL_APIC |
25 | #include <asm/mpspec.h> | 27 | #include <asm/mpspec.h> |
26 | #include <asm/apic.h> | 28 | #include <asm/apic.h> |
27 | #include <mach_apic.h> | 29 | #include <mach_apic.h> |
28 | #include <asm/genapic.h> | 30 | #include <asm/genapic.h> |
31 | #include <asm/uv/uv.h> | ||
29 | #endif | 32 | #endif |
30 | 33 | ||
31 | #include <asm/pda.h> | ||
32 | #include <asm/pgtable.h> | 34 | #include <asm/pgtable.h> |
33 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
34 | #include <asm/desc.h> | 36 | #include <asm/desc.h> |
@@ -50,6 +52,15 @@ cpumask_var_t cpu_initialized_mask; | |||
50 | /* representing cpus for which sibling maps can be computed */ | 52 | /* representing cpus for which sibling maps can be computed */ |
51 | cpumask_var_t cpu_sibling_setup_mask; | 53 | cpumask_var_t cpu_sibling_setup_mask; |
52 | 54 | ||
55 | /* correctly size the local cpu masks */ | ||
56 | void __init setup_cpu_local_masks(void) | ||
57 | { | ||
58 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | ||
59 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | ||
60 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | ||
61 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | ||
62 | } | ||
63 | |||
53 | #else /* CONFIG_X86_32 */ | 64 | #else /* CONFIG_X86_32 */ |
54 | 65 | ||
55 | cpumask_t cpu_callin_map; | 66 | cpumask_t cpu_callin_map; |
@@ -62,23 +73,23 @@ cpumask_t cpu_sibling_setup_map; | |||
62 | 73 | ||
63 | static struct cpu_dev *this_cpu __cpuinitdata; | 74 | static struct cpu_dev *this_cpu __cpuinitdata; |
64 | 75 | ||
76 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
65 | #ifdef CONFIG_X86_64 | 77 | #ifdef CONFIG_X86_64 |
66 | /* We need valid kernel segments for data and code in long mode too | 78 | /* |
67 | * IRET will check the segment types kkeil 2000/10/28 | 79 | * We need valid kernel segments for data and code in long mode too |
68 | * Also sysret mandates a special GDT layout | 80 | * IRET will check the segment types kkeil 2000/10/28 |
69 | */ | 81 | * Also sysret mandates a special GDT layout |
70 | /* The TLS descriptors are currently at a different place compared to i386. | 82 | * |
71 | Hopefully nobody expects them at a fixed place (Wine?) */ | 83 | * The TLS descriptors are currently at a different place compared to i386. |
72 | DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { | 84 | * Hopefully nobody expects them at a fixed place (Wine?) |
85 | */ | ||
73 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 86 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, |
74 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 87 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, |
75 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 88 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, |
76 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 89 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, |
77 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 90 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, |
78 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 91 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, |
79 | } }; | ||
80 | #else | 92 | #else |
81 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | ||
82 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 93 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
83 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 94 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
84 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 95 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
@@ -110,9 +121,9 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
110 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 121 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, |
111 | 122 | ||
112 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 123 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
113 | [GDT_ENTRY_PERCPU] = { { { 0x00000000, 0x00000000 } } }, | 124 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, |
114 | } }; | ||
115 | #endif | 125 | #endif |
126 | } }; | ||
116 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); | 127 | EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); |
117 | 128 | ||
118 | #ifdef CONFIG_X86_32 | 129 | #ifdef CONFIG_X86_32 |
@@ -213,6 +224,49 @@ static inline void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | |||
213 | #endif | 224 | #endif |
214 | 225 | ||
215 | /* | 226 | /* |
227 | * Some CPU features depend on higher CPUID levels, which may not always | ||
228 | * be available due to CPUID level capping or broken virtualization | ||
229 | * software. Add those features to this table to auto-disable them. | ||
230 | */ | ||
231 | struct cpuid_dependent_feature { | ||
232 | u32 feature; | ||
233 | u32 level; | ||
234 | }; | ||
235 | static const struct cpuid_dependent_feature __cpuinitconst | ||
236 | cpuid_dependent_features[] = { | ||
237 | { X86_FEATURE_MWAIT, 0x00000005 }, | ||
238 | { X86_FEATURE_DCA, 0x00000009 }, | ||
239 | { X86_FEATURE_XSAVE, 0x0000000d }, | ||
240 | { 0, 0 } | ||
241 | }; | ||
242 | |||
243 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | ||
244 | { | ||
245 | const struct cpuid_dependent_feature *df; | ||
246 | for (df = cpuid_dependent_features; df->feature; df++) { | ||
247 | /* | ||
248 | * Note: cpuid_level is set to -1 if unavailable, but | ||
249 | * extended_extended_level is set to 0 if unavailable | ||
250 | * and the legitimate extended levels are all negative | ||
251 | * when signed; hence the weird messing around with | ||
252 | * signs here... | ||
253 | */ | ||
254 | if (cpu_has(c, df->feature) && | ||
255 | ((s32)df->feature < 0 ? | ||
256 | (u32)df->feature > (u32)c->extended_cpuid_level : | ||
257 | (s32)df->feature > (s32)c->cpuid_level)) { | ||
258 | clear_cpu_cap(c, df->feature); | ||
259 | if (warn) | ||
260 | printk(KERN_WARNING | ||
261 | "CPU: CPU feature %s disabled " | ||
262 | "due to lack of CPUID level 0x%x\n", | ||
263 | x86_cap_flags[df->feature], | ||
264 | df->level); | ||
265 | } | ||
266 | } | ||
267 | } | ||
268 | |||
269 | /* | ||
216 | * Naming convention should be: <Name> [(<Codename>)] | 270 | * Naming convention should be: <Name> [(<Codename>)] |
217 | * This table only is used unless init_<vendor>() below doesn't set it; | 271 | * This table only is used unless init_<vendor>() below doesn't set it; |
218 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | 272 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used |
@@ -247,12 +301,17 @@ __u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; | |||
247 | void switch_to_new_gdt(void) | 301 | void switch_to_new_gdt(void) |
248 | { | 302 | { |
249 | struct desc_ptr gdt_descr; | 303 | struct desc_ptr gdt_descr; |
304 | int cpu = smp_processor_id(); | ||
250 | 305 | ||
251 | gdt_descr.address = (long)get_cpu_gdt_table(smp_processor_id()); | 306 | gdt_descr.address = (long)get_cpu_gdt_table(cpu); |
252 | gdt_descr.size = GDT_SIZE - 1; | 307 | gdt_descr.size = GDT_SIZE - 1; |
253 | load_gdt(&gdt_descr); | 308 | load_gdt(&gdt_descr); |
309 | /* Reload the per-cpu base */ | ||
254 | #ifdef CONFIG_X86_32 | 310 | #ifdef CONFIG_X86_32 |
255 | asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); | 311 | loadsegment(fs, __KERNEL_PERCPU); |
312 | #else | ||
313 | loadsegment(gs, 0); | ||
314 | wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu)); | ||
256 | #endif | 315 | #endif |
257 | } | 316 | } |
258 | 317 | ||
@@ -570,11 +629,10 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
570 | if (this_cpu->c_early_init) | 629 | if (this_cpu->c_early_init) |
571 | this_cpu->c_early_init(c); | 630 | this_cpu->c_early_init(c); |
572 | 631 | ||
573 | validate_pat_support(c); | ||
574 | |||
575 | #ifdef CONFIG_SMP | 632 | #ifdef CONFIG_SMP |
576 | c->cpu_index = boot_cpu_id; | 633 | c->cpu_index = boot_cpu_id; |
577 | #endif | 634 | #endif |
635 | filter_cpuid_features(c, false); | ||
578 | } | 636 | } |
579 | 637 | ||
580 | void __init early_cpu_init(void) | 638 | void __init early_cpu_init(void) |
@@ -708,6 +766,9 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
708 | * we do "generic changes." | 766 | * we do "generic changes." |
709 | */ | 767 | */ |
710 | 768 | ||
769 | /* Filter out anything that depends on CPUID levels we don't have */ | ||
770 | filter_cpuid_features(c, true); | ||
771 | |||
711 | /* If the model name is still unset, do table lookup. */ | 772 | /* If the model name is still unset, do table lookup. */ |
712 | if (!c->x86_model_id[0]) { | 773 | if (!c->x86_model_id[0]) { |
713 | char *p; | 774 | char *p; |
@@ -877,54 +938,26 @@ static __init int setup_disablecpuid(char *arg) | |||
877 | __setup("clearcpuid=", setup_disablecpuid); | 938 | __setup("clearcpuid=", setup_disablecpuid); |
878 | 939 | ||
879 | #ifdef CONFIG_X86_64 | 940 | #ifdef CONFIG_X86_64 |
880 | struct x8664_pda **_cpu_pda __read_mostly; | ||
881 | EXPORT_SYMBOL(_cpu_pda); | ||
882 | |||
883 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | 941 | struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; |
884 | 942 | ||
885 | static char boot_cpu_stack[IRQSTACKSIZE] __page_aligned_bss; | 943 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
886 | 944 | irq_stack_union) __aligned(PAGE_SIZE); | |
887 | void __cpuinit pda_init(int cpu) | 945 | #ifdef CONFIG_SMP |
888 | { | 946 | DEFINE_PER_CPU(char *, irq_stack_ptr); /* will be set during per cpu init */ |
889 | struct x8664_pda *pda = cpu_pda(cpu); | 947 | #else |
948 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | ||
949 | per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | ||
950 | #endif | ||
890 | 951 | ||
891 | /* Setup up data that may be needed in __get_free_pages early */ | 952 | DEFINE_PER_CPU(unsigned long, kernel_stack) = |
892 | loadsegment(fs, 0); | 953 | (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE; |
893 | loadsegment(gs, 0); | 954 | EXPORT_PER_CPU_SYMBOL(kernel_stack); |
894 | /* Memory clobbers used to order PDA accessed */ | ||
895 | mb(); | ||
896 | wrmsrl(MSR_GS_BASE, pda); | ||
897 | mb(); | ||
898 | |||
899 | pda->cpunumber = cpu; | ||
900 | pda->irqcount = -1; | ||
901 | pda->kernelstack = (unsigned long)stack_thread_info() - | ||
902 | PDA_STACKOFFSET + THREAD_SIZE; | ||
903 | pda->active_mm = &init_mm; | ||
904 | pda->mmu_state = 0; | ||
905 | |||
906 | if (cpu == 0) { | ||
907 | /* others are initialized in smpboot.c */ | ||
908 | pda->pcurrent = &init_task; | ||
909 | pda->irqstackptr = boot_cpu_stack; | ||
910 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
911 | } else { | ||
912 | if (!pda->irqstackptr) { | ||
913 | pda->irqstackptr = (char *) | ||
914 | __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER); | ||
915 | if (!pda->irqstackptr) | ||
916 | panic("cannot allocate irqstack for cpu %d", | ||
917 | cpu); | ||
918 | pda->irqstackptr += IRQSTACKSIZE - 64; | ||
919 | } | ||
920 | 955 | ||
921 | if (pda->nodenumber == 0 && cpu_to_node(cpu) != NUMA_NO_NODE) | 956 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
922 | pda->nodenumber = cpu_to_node(cpu); | ||
923 | } | ||
924 | } | ||
925 | 957 | ||
926 | static char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + | 958 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
927 | DEBUG_STKSZ] __page_aligned_bss; | 959 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) |
960 | __aligned(PAGE_SIZE); | ||
928 | 961 | ||
929 | extern asmlinkage void ignore_sysret(void); | 962 | extern asmlinkage void ignore_sysret(void); |
930 | 963 | ||
@@ -982,15 +1015,14 @@ void __cpuinit cpu_init(void) | |||
982 | struct tss_struct *t = &per_cpu(init_tss, cpu); | 1015 | struct tss_struct *t = &per_cpu(init_tss, cpu); |
983 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | 1016 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); |
984 | unsigned long v; | 1017 | unsigned long v; |
985 | char *estacks = NULL; | ||
986 | struct task_struct *me; | 1018 | struct task_struct *me; |
987 | int i; | 1019 | int i; |
988 | 1020 | ||
989 | /* CPU 0 is initialised in head64.c */ | 1021 | #ifdef CONFIG_NUMA |
990 | if (cpu != 0) | 1022 | if (cpu != 0 && percpu_read(node_number) == 0 && |
991 | pda_init(cpu); | 1023 | cpu_to_node(cpu) != NUMA_NO_NODE) |
992 | else | 1024 | percpu_write(node_number, cpu_to_node(cpu)); |
993 | estacks = boot_exception_stacks; | 1025 | #endif |
994 | 1026 | ||
995 | me = current; | 1027 | me = current; |
996 | 1028 | ||
@@ -1007,6 +1039,8 @@ void __cpuinit cpu_init(void) | |||
1007 | */ | 1039 | */ |
1008 | 1040 | ||
1009 | switch_to_new_gdt(); | 1041 | switch_to_new_gdt(); |
1042 | loadsegment(fs, 0); | ||
1043 | |||
1010 | load_idt((const struct desc_ptr *)&idt_descr); | 1044 | load_idt((const struct desc_ptr *)&idt_descr); |
1011 | 1045 | ||
1012 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); | 1046 | memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); |
@@ -1024,18 +1058,13 @@ void __cpuinit cpu_init(void) | |||
1024 | * set up and load the per-CPU TSS | 1058 | * set up and load the per-CPU TSS |
1025 | */ | 1059 | */ |
1026 | if (!orig_ist->ist[0]) { | 1060 | if (!orig_ist->ist[0]) { |
1027 | static const unsigned int order[N_EXCEPTION_STACKS] = { | 1061 | static const unsigned int sizes[N_EXCEPTION_STACKS] = { |
1028 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER, | 1062 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, |
1029 | [DEBUG_STACK - 1] = DEBUG_STACK_ORDER | 1063 | [DEBUG_STACK - 1] = DEBUG_STKSZ |
1030 | }; | 1064 | }; |
1065 | char *estacks = per_cpu(exception_stacks, cpu); | ||
1031 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1066 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1032 | if (cpu) { | 1067 | estacks += sizes[v]; |
1033 | estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]); | ||
1034 | if (!estacks) | ||
1035 | panic("Cannot allocate exception " | ||
1036 | "stack %ld %d\n", v, cpu); | ||
1037 | } | ||
1038 | estacks += PAGE_SIZE << order[v]; | ||
1039 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1068 | orig_ist->ist[v] = t->x86_tss.ist[v] = |
1040 | (unsigned long)estacks; | 1069 | (unsigned long)estacks; |
1041 | } | 1070 | } |
@@ -1069,22 +1098,19 @@ void __cpuinit cpu_init(void) | |||
1069 | */ | 1098 | */ |
1070 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) | 1099 | if (kgdb_connected && arch_kgdb_ops.correct_hw_break) |
1071 | arch_kgdb_ops.correct_hw_break(); | 1100 | arch_kgdb_ops.correct_hw_break(); |
1072 | else { | 1101 | else |
1073 | #endif | 1102 | #endif |
1074 | /* | 1103 | { |
1075 | * Clear all 6 debug registers: | 1104 | /* |
1076 | */ | 1105 | * Clear all 6 debug registers: |
1077 | 1106 | */ | |
1078 | set_debugreg(0UL, 0); | 1107 | set_debugreg(0UL, 0); |
1079 | set_debugreg(0UL, 1); | 1108 | set_debugreg(0UL, 1); |
1080 | set_debugreg(0UL, 2); | 1109 | set_debugreg(0UL, 2); |
1081 | set_debugreg(0UL, 3); | 1110 | set_debugreg(0UL, 3); |
1082 | set_debugreg(0UL, 6); | 1111 | set_debugreg(0UL, 6); |
1083 | set_debugreg(0UL, 7); | 1112 | set_debugreg(0UL, 7); |
1084 | #ifdef CONFIG_KGDB | ||
1085 | /* If the kgdb is connected no debug regs should be altered. */ | ||
1086 | } | 1113 | } |
1087 | #endif | ||
1088 | 1114 | ||
1089 | fpu_init(); | 1115 | fpu_init(); |
1090 | 1116 | ||
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c index 6f11e029e8c5..4b1c319d30c3 100644 --- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c | |||
@@ -145,13 +145,14 @@ typedef union { | |||
145 | 145 | ||
146 | struct drv_cmd { | 146 | struct drv_cmd { |
147 | unsigned int type; | 147 | unsigned int type; |
148 | cpumask_var_t mask; | 148 | const struct cpumask *mask; |
149 | drv_addr_union addr; | 149 | drv_addr_union addr; |
150 | u32 val; | 150 | u32 val; |
151 | }; | 151 | }; |
152 | 152 | ||
153 | static void do_drv_read(struct drv_cmd *cmd) | 153 | static long do_drv_read(void *_cmd) |
154 | { | 154 | { |
155 | struct drv_cmd *cmd = _cmd; | ||
155 | u32 h; | 156 | u32 h; |
156 | 157 | ||
157 | switch (cmd->type) { | 158 | switch (cmd->type) { |
@@ -166,10 +167,12 @@ static void do_drv_read(struct drv_cmd *cmd) | |||
166 | default: | 167 | default: |
167 | break; | 168 | break; |
168 | } | 169 | } |
170 | return 0; | ||
169 | } | 171 | } |
170 | 172 | ||
171 | static void do_drv_write(struct drv_cmd *cmd) | 173 | static long do_drv_write(void *_cmd) |
172 | { | 174 | { |
175 | struct drv_cmd *cmd = _cmd; | ||
173 | u32 lo, hi; | 176 | u32 lo, hi; |
174 | 177 | ||
175 | switch (cmd->type) { | 178 | switch (cmd->type) { |
@@ -186,30 +189,23 @@ static void do_drv_write(struct drv_cmd *cmd) | |||
186 | default: | 189 | default: |
187 | break; | 190 | break; |
188 | } | 191 | } |
192 | return 0; | ||
189 | } | 193 | } |
190 | 194 | ||
191 | static void drv_read(struct drv_cmd *cmd) | 195 | static void drv_read(struct drv_cmd *cmd) |
192 | { | 196 | { |
193 | cpumask_t saved_mask = current->cpus_allowed; | ||
194 | cmd->val = 0; | 197 | cmd->val = 0; |
195 | 198 | ||
196 | set_cpus_allowed_ptr(current, cmd->mask); | 199 | work_on_cpu(cpumask_any(cmd->mask), do_drv_read, cmd); |
197 | do_drv_read(cmd); | ||
198 | set_cpus_allowed_ptr(current, &saved_mask); | ||
199 | } | 200 | } |
200 | 201 | ||
201 | static void drv_write(struct drv_cmd *cmd) | 202 | static void drv_write(struct drv_cmd *cmd) |
202 | { | 203 | { |
203 | cpumask_t saved_mask = current->cpus_allowed; | ||
204 | unsigned int i; | 204 | unsigned int i; |
205 | 205 | ||
206 | for_each_cpu(i, cmd->mask) { | 206 | for_each_cpu(i, cmd->mask) { |
207 | set_cpus_allowed_ptr(current, cpumask_of(i)); | 207 | work_on_cpu(i, do_drv_write, cmd); |
208 | do_drv_write(cmd); | ||
209 | } | 208 | } |
210 | |||
211 | set_cpus_allowed_ptr(current, &saved_mask); | ||
212 | return; | ||
213 | } | 209 | } |
214 | 210 | ||
215 | static u32 get_cur_val(const struct cpumask *mask) | 211 | static u32 get_cur_val(const struct cpumask *mask) |
@@ -235,8 +231,7 @@ static u32 get_cur_val(const struct cpumask *mask) | |||
235 | return 0; | 231 | return 0; |
236 | } | 232 | } |
237 | 233 | ||
238 | cpumask_copy(cmd.mask, mask); | 234 | cmd.mask = mask; |
239 | |||
240 | drv_read(&cmd); | 235 | drv_read(&cmd); |
241 | 236 | ||
242 | dprintk("get_cur_val = %u\n", cmd.val); | 237 | dprintk("get_cur_val = %u\n", cmd.val); |
@@ -368,7 +363,7 @@ static unsigned int get_cur_freq_on_cpu(unsigned int cpu) | |||
368 | return freq; | 363 | return freq; |
369 | } | 364 | } |
370 | 365 | ||
371 | static unsigned int check_freqs(const cpumask_t *mask, unsigned int freq, | 366 | static unsigned int check_freqs(const struct cpumask *mask, unsigned int freq, |
372 | struct acpi_cpufreq_data *data) | 367 | struct acpi_cpufreq_data *data) |
373 | { | 368 | { |
374 | unsigned int cur_freq; | 369 | unsigned int cur_freq; |
@@ -403,9 +398,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
403 | return -ENODEV; | 398 | return -ENODEV; |
404 | } | 399 | } |
405 | 400 | ||
406 | if (unlikely(!alloc_cpumask_var(&cmd.mask, GFP_KERNEL))) | ||
407 | return -ENOMEM; | ||
408 | |||
409 | perf = data->acpi_data; | 401 | perf = data->acpi_data; |
410 | result = cpufreq_frequency_table_target(policy, | 402 | result = cpufreq_frequency_table_target(policy, |
411 | data->freq_table, | 403 | data->freq_table, |
@@ -450,9 +442,9 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
450 | 442 | ||
451 | /* cpufreq holds the hotplug lock, so we are safe from here on */ | 443 | /* cpufreq holds the hotplug lock, so we are safe from here on */ |
452 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) | 444 | if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) |
453 | cpumask_and(cmd.mask, cpu_online_mask, policy->cpus); | 445 | cmd.mask = policy->cpus; |
454 | else | 446 | else |
455 | cpumask_copy(cmd.mask, cpumask_of(policy->cpu)); | 447 | cmd.mask = cpumask_of(policy->cpu); |
456 | 448 | ||
457 | freqs.old = perf->states[perf->state].core_frequency * 1000; | 449 | freqs.old = perf->states[perf->state].core_frequency * 1000; |
458 | freqs.new = data->freq_table[next_state].frequency; | 450 | freqs.new = data->freq_table[next_state].frequency; |
@@ -479,7 +471,6 @@ static int acpi_cpufreq_target(struct cpufreq_policy *policy, | |||
479 | perf->state = next_perf_state; | 471 | perf->state = next_perf_state; |
480 | 472 | ||
481 | out: | 473 | out: |
482 | free_cpumask_var(cmd.mask); | ||
483 | return result; | 474 | return result; |
484 | } | 475 | } |
485 | 476 | ||
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 8ea6929e974c..5deefae9064d 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -29,6 +29,19 @@ | |||
29 | 29 | ||
30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 30 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) |
31 | { | 31 | { |
32 | /* Unmask CPUID levels if masked: */ | ||
33 | if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { | ||
34 | u64 misc_enable; | ||
35 | |||
36 | rdmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
37 | |||
38 | if (misc_enable & MSR_IA32_MISC_ENABLE_LIMIT_CPUID) { | ||
39 | misc_enable &= ~MSR_IA32_MISC_ENABLE_LIMIT_CPUID; | ||
40 | wrmsrl(MSR_IA32_MISC_ENABLE, misc_enable); | ||
41 | c->cpuid_level = cpuid_eax(0); | ||
42 | } | ||
43 | } | ||
44 | |||
32 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || | 45 | if ((c->x86 == 0xf && c->x86_model >= 0x03) || |
33 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) | 46 | (c->x86 == 0x6 && c->x86_model >= 0x0e)) |
34 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 47 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
@@ -50,6 +63,18 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
50 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); | 63 | set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); |
51 | } | 64 | } |
52 | 65 | ||
66 | /* | ||
67 | * There is a known erratum on Pentium III and Core Solo | ||
68 | * and Core Duo CPUs. | ||
69 | * " Page with PAT set to WC while associated MTRR is UC | ||
70 | * may consolidate to UC " | ||
71 | * Because of this erratum, it is better to stick with | ||
72 | * setting WC in MTRR rather than using PAT on these CPUs. | ||
73 | * | ||
74 | * Enable PAT WC only on P4, Core 2 or later CPUs. | ||
75 | */ | ||
76 | if (c->x86 == 6 && c->x86_model < 15) | ||
77 | clear_cpu_cap(c, X86_FEATURE_PAT); | ||
53 | } | 78 | } |
54 | 79 | ||
55 | #ifdef CONFIG_X86_32 | 80 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 48533d77be78..58527a9fc404 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -132,7 +132,16 @@ struct _cpuid4_info { | |||
132 | union _cpuid4_leaf_ecx ecx; | 132 | union _cpuid4_leaf_ecx ecx; |
133 | unsigned long size; | 133 | unsigned long size; |
134 | unsigned long can_disable; | 134 | unsigned long can_disable; |
135 | cpumask_t shared_cpu_map; /* future?: only cpus/node is needed */ | 135 | DECLARE_BITMAP(shared_cpu_map, NR_CPUS); |
136 | }; | ||
137 | |||
138 | /* subset of above _cpuid4_info w/o shared_cpu_map */ | ||
139 | struct _cpuid4_info_regs { | ||
140 | union _cpuid4_leaf_eax eax; | ||
141 | union _cpuid4_leaf_ebx ebx; | ||
142 | union _cpuid4_leaf_ecx ecx; | ||
143 | unsigned long size; | ||
144 | unsigned long can_disable; | ||
136 | }; | 145 | }; |
137 | 146 | ||
138 | #ifdef CONFIG_PCI | 147 | #ifdef CONFIG_PCI |
@@ -263,7 +272,7 @@ amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | |||
263 | } | 272 | } |
264 | 273 | ||
265 | static void __cpuinit | 274 | static void __cpuinit |
266 | amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | 275 | amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf) |
267 | { | 276 | { |
268 | if (index < 3) | 277 | if (index < 3) |
269 | return; | 278 | return; |
@@ -271,7 +280,8 @@ amd_check_l3_disable(int index, struct _cpuid4_info *this_leaf) | |||
271 | } | 280 | } |
272 | 281 | ||
273 | static int | 282 | static int |
274 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | 283 | __cpuinit cpuid4_cache_lookup_regs(int index, |
284 | struct _cpuid4_info_regs *this_leaf) | ||
275 | { | 285 | { |
276 | union _cpuid4_leaf_eax eax; | 286 | union _cpuid4_leaf_eax eax; |
277 | union _cpuid4_leaf_ebx ebx; | 287 | union _cpuid4_leaf_ebx ebx; |
@@ -299,6 +309,15 @@ __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | |||
299 | return 0; | 309 | return 0; |
300 | } | 310 | } |
301 | 311 | ||
312 | static int | ||
313 | __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf) | ||
314 | { | ||
315 | struct _cpuid4_info_regs *leaf_regs = | ||
316 | (struct _cpuid4_info_regs *)this_leaf; | ||
317 | |||
318 | return cpuid4_cache_lookup_regs(index, leaf_regs); | ||
319 | } | ||
320 | |||
302 | static int __cpuinit find_num_cache_leaves(void) | 321 | static int __cpuinit find_num_cache_leaves(void) |
303 | { | 322 | { |
304 | unsigned int eax, ebx, ecx, edx; | 323 | unsigned int eax, ebx, ecx, edx; |
@@ -338,11 +357,10 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | |||
338 | * parameters cpuid leaf to find the cache details | 357 | * parameters cpuid leaf to find the cache details |
339 | */ | 358 | */ |
340 | for (i = 0; i < num_cache_leaves; i++) { | 359 | for (i = 0; i < num_cache_leaves; i++) { |
341 | struct _cpuid4_info this_leaf; | 360 | struct _cpuid4_info_regs this_leaf; |
342 | |||
343 | int retval; | 361 | int retval; |
344 | 362 | ||
345 | retval = cpuid4_cache_lookup(i, &this_leaf); | 363 | retval = cpuid4_cache_lookup_regs(i, &this_leaf); |
346 | if (retval >= 0) { | 364 | if (retval >= 0) { |
347 | switch(this_leaf.eax.split.level) { | 365 | switch(this_leaf.eax.split.level) { |
348 | case 1: | 366 | case 1: |
@@ -491,17 +509,20 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
491 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; | 509 | num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; |
492 | 510 | ||
493 | if (num_threads_sharing == 1) | 511 | if (num_threads_sharing == 1) |
494 | cpu_set(cpu, this_leaf->shared_cpu_map); | 512 | cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map)); |
495 | else { | 513 | else { |
496 | index_msb = get_count_order(num_threads_sharing); | 514 | index_msb = get_count_order(num_threads_sharing); |
497 | 515 | ||
498 | for_each_online_cpu(i) { | 516 | for_each_online_cpu(i) { |
499 | if (cpu_data(i).apicid >> index_msb == | 517 | if (cpu_data(i).apicid >> index_msb == |
500 | c->apicid >> index_msb) { | 518 | c->apicid >> index_msb) { |
501 | cpu_set(i, this_leaf->shared_cpu_map); | 519 | cpumask_set_cpu(i, |
520 | to_cpumask(this_leaf->shared_cpu_map)); | ||
502 | if (i != cpu && per_cpu(cpuid4_info, i)) { | 521 | if (i != cpu && per_cpu(cpuid4_info, i)) { |
503 | sibling_leaf = CPUID4_INFO_IDX(i, index); | 522 | sibling_leaf = |
504 | cpu_set(cpu, sibling_leaf->shared_cpu_map); | 523 | CPUID4_INFO_IDX(i, index); |
524 | cpumask_set_cpu(cpu, to_cpumask( | ||
525 | sibling_leaf->shared_cpu_map)); | ||
505 | } | 526 | } |
506 | } | 527 | } |
507 | } | 528 | } |
@@ -513,9 +534,10 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
513 | int sibling; | 534 | int sibling; |
514 | 535 | ||
515 | this_leaf = CPUID4_INFO_IDX(cpu, index); | 536 | this_leaf = CPUID4_INFO_IDX(cpu, index); |
516 | for_each_cpu_mask_nr(sibling, this_leaf->shared_cpu_map) { | 537 | for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) { |
517 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); | 538 | sibling_leaf = CPUID4_INFO_IDX(sibling, index); |
518 | cpu_clear(cpu, sibling_leaf->shared_cpu_map); | 539 | cpumask_clear_cpu(cpu, |
540 | to_cpumask(sibling_leaf->shared_cpu_map)); | ||
519 | } | 541 | } |
520 | } | 542 | } |
521 | #else | 543 | #else |
@@ -620,8 +642,9 @@ static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf, | |||
620 | int n = 0; | 642 | int n = 0; |
621 | 643 | ||
622 | if (len > 1) { | 644 | if (len > 1) { |
623 | cpumask_t *mask = &this_leaf->shared_cpu_map; | 645 | const struct cpumask *mask; |
624 | 646 | ||
647 | mask = to_cpumask(this_leaf->shared_cpu_map); | ||
625 | n = type? | 648 | n = type? |
626 | cpulist_scnprintf(buf, len-2, mask) : | 649 | cpulist_scnprintf(buf, len-2, mask) : |
627 | cpumask_scnprintf(buf, len-2, mask); | 650 | cpumask_scnprintf(buf, len-2, mask); |
@@ -684,7 +707,8 @@ static struct pci_dev *get_k8_northbridge(int node) | |||
684 | 707 | ||
685 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) | 708 | static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf) |
686 | { | 709 | { |
687 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 710 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
711 | int node = cpu_to_node(cpumask_first(mask)); | ||
688 | struct pci_dev *dev = NULL; | 712 | struct pci_dev *dev = NULL; |
689 | ssize_t ret = 0; | 713 | ssize_t ret = 0; |
690 | int i; | 714 | int i; |
@@ -718,7 +742,8 @@ static ssize_t | |||
718 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, | 742 | store_cache_disable(struct _cpuid4_info *this_leaf, const char *buf, |
719 | size_t count) | 743 | size_t count) |
720 | { | 744 | { |
721 | int node = cpu_to_node(first_cpu(this_leaf->shared_cpu_map)); | 745 | const struct cpumask *mask = to_cpumask(this_leaf->shared_cpu_map); |
746 | int node = cpu_to_node(cpumask_first(mask)); | ||
722 | struct pci_dev *dev = NULL; | 747 | struct pci_dev *dev = NULL; |
723 | unsigned int ret, index, val; | 748 | unsigned int ret, index, val; |
724 | 749 | ||
@@ -863,7 +888,7 @@ err_out: | |||
863 | return -ENOMEM; | 888 | return -ENOMEM; |
864 | } | 889 | } |
865 | 890 | ||
866 | static cpumask_t cache_dev_map = CPU_MASK_NONE; | 891 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); |
867 | 892 | ||
868 | /* Add/Remove cache interface for CPU device */ | 893 | /* Add/Remove cache interface for CPU device */ |
869 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | 894 | static int __cpuinit cache_add_dev(struct sys_device * sys_dev) |
@@ -903,7 +928,7 @@ static int __cpuinit cache_add_dev(struct sys_device * sys_dev) | |||
903 | } | 928 | } |
904 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); | 929 | kobject_uevent(&(this_object->kobj), KOBJ_ADD); |
905 | } | 930 | } |
906 | cpu_set(cpu, cache_dev_map); | 931 | cpumask_set_cpu(cpu, to_cpumask(cache_dev_map)); |
907 | 932 | ||
908 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); | 933 | kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD); |
909 | return 0; | 934 | return 0; |
@@ -916,9 +941,9 @@ static void __cpuinit cache_remove_dev(struct sys_device * sys_dev) | |||
916 | 941 | ||
917 | if (per_cpu(cpuid4_info, cpu) == NULL) | 942 | if (per_cpu(cpuid4_info, cpu) == NULL) |
918 | return; | 943 | return; |
919 | if (!cpu_isset(cpu, cache_dev_map)) | 944 | if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map))) |
920 | return; | 945 | return; |
921 | cpu_clear(cpu, cache_dev_map); | 946 | cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map)); |
922 | 947 | ||
923 | for (i = 0; i < num_cache_leaves; i++) | 948 | for (i = 0; i < num_cache_leaves; i++) |
924 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); | 949 | kobject_put(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c index 8ae8c4ff094d..4772e91e8246 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -67,7 +67,7 @@ static struct threshold_block threshold_defaults = { | |||
67 | struct threshold_bank { | 67 | struct threshold_bank { |
68 | struct kobject *kobj; | 68 | struct kobject *kobj; |
69 | struct threshold_block *blocks; | 69 | struct threshold_block *blocks; |
70 | cpumask_t cpus; | 70 | cpumask_var_t cpus; |
71 | }; | 71 | }; |
72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | 72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); |
73 | 73 | ||
@@ -481,7 +481,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
481 | 481 | ||
482 | #ifdef CONFIG_SMP | 482 | #ifdef CONFIG_SMP |
483 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | 483 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ |
484 | i = first_cpu(per_cpu(cpu_core_map, cpu)); | 484 | i = cpumask_first(&per_cpu(cpu_core_map, cpu)); |
485 | 485 | ||
486 | /* first core not up yet */ | 486 | /* first core not up yet */ |
487 | if (cpu_data(i).cpu_core_id) | 487 | if (cpu_data(i).cpu_core_id) |
@@ -501,7 +501,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
501 | if (err) | 501 | if (err) |
502 | goto out; | 502 | goto out; |
503 | 503 | ||
504 | b->cpus = per_cpu(cpu_core_map, cpu); | 504 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); |
505 | per_cpu(threshold_banks, cpu)[bank] = b; | 505 | per_cpu(threshold_banks, cpu)[bank] = b; |
506 | goto out; | 506 | goto out; |
507 | } | 507 | } |
@@ -512,15 +512,20 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
512 | err = -ENOMEM; | 512 | err = -ENOMEM; |
513 | goto out; | 513 | goto out; |
514 | } | 514 | } |
515 | if (!alloc_cpumask_var(&b->cpus, GFP_KERNEL)) { | ||
516 | kfree(b); | ||
517 | err = -ENOMEM; | ||
518 | goto out; | ||
519 | } | ||
515 | 520 | ||
516 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); | 521 | b->kobj = kobject_create_and_add(name, &per_cpu(device_mce, cpu).kobj); |
517 | if (!b->kobj) | 522 | if (!b->kobj) |
518 | goto out_free; | 523 | goto out_free; |
519 | 524 | ||
520 | #ifndef CONFIG_SMP | 525 | #ifndef CONFIG_SMP |
521 | b->cpus = CPU_MASK_ALL; | 526 | cpumask_setall(b->cpus); |
522 | #else | 527 | #else |
523 | b->cpus = per_cpu(cpu_core_map, cpu); | 528 | cpumask_copy(b->cpus, &per_cpu(cpu_core_map, cpu)); |
524 | #endif | 529 | #endif |
525 | 530 | ||
526 | per_cpu(threshold_banks, cpu)[bank] = b; | 531 | per_cpu(threshold_banks, cpu)[bank] = b; |
@@ -529,7 +534,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
529 | if (err) | 534 | if (err) |
530 | goto out_free; | 535 | goto out_free; |
531 | 536 | ||
532 | for_each_cpu_mask_nr(i, b->cpus) { | 537 | for_each_cpu(i, b->cpus) { |
533 | if (i == cpu) | 538 | if (i == cpu) |
534 | continue; | 539 | continue; |
535 | 540 | ||
@@ -545,6 +550,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
545 | 550 | ||
546 | out_free: | 551 | out_free: |
547 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 552 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
553 | free_cpumask_var(b->cpus); | ||
548 | kfree(b); | 554 | kfree(b); |
549 | out: | 555 | out: |
550 | return err; | 556 | return err; |
@@ -619,7 +625,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
619 | #endif | 625 | #endif |
620 | 626 | ||
621 | /* remove all sibling symlinks before unregistering */ | 627 | /* remove all sibling symlinks before unregistering */ |
622 | for_each_cpu_mask_nr(i, b->cpus) { | 628 | for_each_cpu(i, b->cpus) { |
623 | if (i == cpu) | 629 | if (i == cpu) |
624 | continue; | 630 | continue; |
625 | 631 | ||
@@ -632,6 +638,7 @@ static void threshold_remove_bank(unsigned int cpu, int bank) | |||
632 | free_out: | 638 | free_out: |
633 | kobject_del(b->kobj); | 639 | kobject_del(b->kobj); |
634 | kobject_put(b->kobj); | 640 | kobject_put(b->kobj); |
641 | free_cpumask_var(b->cpus); | ||
635 | kfree(b); | 642 | kfree(b); |
636 | per_cpu(threshold_banks, cpu)[bank] = NULL; | 643 | per_cpu(threshold_banks, cpu)[bank] = NULL; |
637 | } | 644 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index 4b48f251fd39..5e8c79e748a6 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <linux/interrupt.h> | 7 | #include <linux/interrupt.h> |
8 | #include <linux/percpu.h> | 8 | #include <linux/percpu.h> |
9 | #include <asm/processor.h> | 9 | #include <asm/processor.h> |
10 | #include <asm/apic.h> | ||
10 | #include <asm/msr.h> | 11 | #include <asm/msr.h> |
11 | #include <asm/mce.h> | 12 | #include <asm/mce.h> |
12 | #include <asm/hw_irq.h> | 13 | #include <asm/hw_irq.h> |
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c index b59ddcc88cd8..0c0a455fe95c 100644 --- a/arch/x86/kernel/cpu/mtrr/generic.c +++ b/arch/x86/kernel/cpu/mtrr/generic.c | |||
@@ -33,11 +33,13 @@ u64 mtrr_tom2; | |||
33 | struct mtrr_state_type mtrr_state = {}; | 33 | struct mtrr_state_type mtrr_state = {}; |
34 | EXPORT_SYMBOL_GPL(mtrr_state); | 34 | EXPORT_SYMBOL_GPL(mtrr_state); |
35 | 35 | ||
36 | #undef MODULE_PARAM_PREFIX | 36 | static int __initdata mtrr_show; |
37 | #define MODULE_PARAM_PREFIX "mtrr." | 37 | static int __init mtrr_debug(char *opt) |
38 | 38 | { | |
39 | static int mtrr_show; | 39 | mtrr_show = 1; |
40 | module_param_named(show, mtrr_show, bool, 0); | 40 | return 0; |
41 | } | ||
42 | early_param("mtrr.show", mtrr_debug); | ||
41 | 43 | ||
42 | /* | 44 | /* |
43 | * Returns the effective MTRR type for the region | 45 | * Returns the effective MTRR type for the region |