diff options
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r-- | arch/x86/kernel/cpu/addon_cpuid_features.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/centaur.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/centaur_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 390 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cpu.h | 11 | ||||
-rwxr-xr-x | arch/x86/kernel/cpu/cpu_debug.c | 118 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/cyrix.c | 16 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 7 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel_cacheinfo.c | 8 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/transmeta.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/umc.c | 2 |
12 files changed, 338 insertions, 224 deletions
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c index 6882a735d9c0..8220ae69849d 100644 --- a/arch/x86/kernel/cpu/addon_cpuid_features.c +++ b/arch/x86/kernel/cpu/addon_cpuid_features.c | |||
@@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | |||
29 | u32 regs[4]; | 29 | u32 regs[4]; |
30 | const struct cpuid_bit *cb; | 30 | const struct cpuid_bit *cb; |
31 | 31 | ||
32 | static const struct cpuid_bit cpuid_bits[] = { | 32 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { |
33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, | 33 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, |
34 | { 0, 0, 0, 0 } | 34 | { 0, 0, 0, 0 } |
35 | }; | 35 | }; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index f47df59016c5..7e4a459daa64 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -502,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int | |||
502 | } | 502 | } |
503 | #endif | 503 | #endif |
504 | 504 | ||
505 | static struct cpu_dev amd_cpu_dev __cpuinitdata = { | 505 | static const struct cpu_dev __cpuinitconst amd_cpu_dev = { |
506 | .c_vendor = "AMD", | 506 | .c_vendor = "AMD", |
507 | .c_ident = { "AuthenticAMD" }, | 507 | .c_ident = { "AuthenticAMD" }, |
508 | #ifdef CONFIG_X86_32 | 508 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 89bfdd9cacc6..983e0830f0da 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -468,7 +468,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
468 | return size; | 468 | return size; |
469 | } | 469 | } |
470 | 470 | ||
471 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | 471 | static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { |
472 | .c_vendor = "Centaur", | 472 | .c_vendor = "Centaur", |
473 | .c_ident = { "CentaurHauls" }, | 473 | .c_ident = { "CentaurHauls" }, |
474 | .c_early_init = early_init_centaur, | 474 | .c_early_init = early_init_centaur, |
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c index a1625f5a1e78..51b09c48c9c7 100644 --- a/arch/x86/kernel/cpu/centaur_64.c +++ b/arch/x86/kernel/cpu/centaur_64.c | |||
@@ -25,7 +25,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | |||
25 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); | 25 | set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); |
26 | } | 26 | } |
27 | 27 | ||
28 | static struct cpu_dev centaur_cpu_dev __cpuinitdata = { | 28 | static const struct cpu_dev centaur_cpu_dev __cpuinitconst = { |
29 | .c_vendor = "Centaur", | 29 | .c_vendor = "Centaur", |
30 | .c_ident = { "CentaurHauls" }, | 30 | .c_ident = { "CentaurHauls" }, |
31 | .c_early_init = early_init_centaur, | 31 | .c_early_init = early_init_centaur, |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f8869978bbb7..e2962cc1e27b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -1,52 +1,52 @@ | |||
1 | #include <linux/init.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/sched.h> | ||
4 | #include <linux/string.h> | ||
5 | #include <linux/bootmem.h> | 1 | #include <linux/bootmem.h> |
2 | #include <linux/linkage.h> | ||
6 | #include <linux/bitops.h> | 3 | #include <linux/bitops.h> |
4 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | 5 | #include <linux/module.h> |
8 | #include <linux/kgdb.h> | 6 | #include <linux/percpu.h> |
9 | #include <linux/topology.h> | 7 | #include <linux/string.h> |
10 | #include <linux/delay.h> | 8 | #include <linux/delay.h> |
9 | #include <linux/sched.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kgdb.h> | ||
11 | #include <linux/smp.h> | 12 | #include <linux/smp.h> |
12 | #include <linux/percpu.h> | 13 | #include <linux/io.h> |
13 | #include <asm/i387.h> | 14 | |
14 | #include <asm/msr.h> | 15 | #include <asm/stackprotector.h> |
15 | #include <asm/io.h> | ||
16 | #include <asm/linkage.h> | ||
17 | #include <asm/mmu_context.h> | 16 | #include <asm/mmu_context.h> |
17 | #include <asm/hypervisor.h> | ||
18 | #include <asm/processor.h> | ||
19 | #include <asm/sections.h> | ||
20 | #include <asm/topology.h> | ||
21 | #include <asm/cpumask.h> | ||
22 | #include <asm/pgtable.h> | ||
23 | #include <asm/atomic.h> | ||
24 | #include <asm/proto.h> | ||
25 | #include <asm/setup.h> | ||
26 | #include <asm/apic.h> | ||
27 | #include <asm/desc.h> | ||
28 | #include <asm/i387.h> | ||
18 | #include <asm/mtrr.h> | 29 | #include <asm/mtrr.h> |
30 | #include <asm/numa.h> | ||
31 | #include <asm/asm.h> | ||
32 | #include <asm/cpu.h> | ||
19 | #include <asm/mce.h> | 33 | #include <asm/mce.h> |
34 | #include <asm/msr.h> | ||
20 | #include <asm/pat.h> | 35 | #include <asm/pat.h> |
21 | #include <asm/asm.h> | ||
22 | #include <asm/numa.h> | ||
23 | #include <asm/smp.h> | 36 | #include <asm/smp.h> |
24 | #include <asm/cpu.h> | ||
25 | #include <asm/cpumask.h> | ||
26 | #include <asm/apic.h> | ||
27 | 37 | ||
28 | #ifdef CONFIG_X86_LOCAL_APIC | 38 | #ifdef CONFIG_X86_LOCAL_APIC |
29 | #include <asm/uv/uv.h> | 39 | #include <asm/uv/uv.h> |
30 | #endif | 40 | #endif |
31 | 41 | ||
32 | #include <asm/pgtable.h> | ||
33 | #include <asm/processor.h> | ||
34 | #include <asm/desc.h> | ||
35 | #include <asm/atomic.h> | ||
36 | #include <asm/proto.h> | ||
37 | #include <asm/sections.h> | ||
38 | #include <asm/setup.h> | ||
39 | #include <asm/hypervisor.h> | ||
40 | #include <asm/stackprotector.h> | ||
41 | |||
42 | #include "cpu.h" | 42 | #include "cpu.h" |
43 | 43 | ||
44 | #ifdef CONFIG_X86_64 | 44 | #ifdef CONFIG_X86_64 |
45 | 45 | ||
46 | /* all of these masks are initialized in setup_cpu_local_masks() */ | 46 | /* all of these masks are initialized in setup_cpu_local_masks() */ |
47 | cpumask_var_t cpu_callin_mask; | ||
48 | cpumask_var_t cpu_callout_mask; | ||
49 | cpumask_var_t cpu_initialized_mask; | 47 | cpumask_var_t cpu_initialized_mask; |
48 | cpumask_var_t cpu_callout_mask; | ||
49 | cpumask_var_t cpu_callin_mask; | ||
50 | 50 | ||
51 | /* representing cpus for which sibling maps can be computed */ | 51 | /* representing cpus for which sibling maps can be computed */ |
52 | cpumask_var_t cpu_sibling_setup_mask; | 52 | cpumask_var_t cpu_sibling_setup_mask; |
@@ -62,15 +62,15 @@ void __init setup_cpu_local_masks(void) | |||
62 | 62 | ||
63 | #else /* CONFIG_X86_32 */ | 63 | #else /* CONFIG_X86_32 */ |
64 | 64 | ||
65 | cpumask_t cpu_callin_map; | 65 | cpumask_t cpu_sibling_setup_map; |
66 | cpumask_t cpu_callout_map; | 66 | cpumask_t cpu_callout_map; |
67 | cpumask_t cpu_initialized; | 67 | cpumask_t cpu_initialized; |
68 | cpumask_t cpu_sibling_setup_map; | 68 | cpumask_t cpu_callin_map; |
69 | 69 | ||
70 | #endif /* CONFIG_X86_32 */ | 70 | #endif /* CONFIG_X86_32 */ |
71 | 71 | ||
72 | 72 | ||
73 | static struct cpu_dev *this_cpu __cpuinitdata; | 73 | static const struct cpu_dev *this_cpu __cpuinitdata; |
74 | 74 | ||
75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
76 | #ifdef CONFIG_X86_64 | 76 | #ifdef CONFIG_X86_64 |
@@ -79,48 +79,48 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | |||
79 | * IRET will check the segment types kkeil 2000/10/28 | 79 | * IRET will check the segment types kkeil 2000/10/28 |
80 | * Also sysret mandates a special GDT layout | 80 | * Also sysret mandates a special GDT layout |
81 | * | 81 | * |
82 | * The TLS descriptors are currently at a different place compared to i386. | 82 | * TLS descriptors are currently at a different place compared to i386. |
83 | * Hopefully nobody expects them at a fixed place (Wine?) | 83 | * Hopefully nobody expects them at a fixed place (Wine?) |
84 | */ | 84 | */ |
85 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, | 85 | [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, |
86 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, | 86 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, |
87 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, | 87 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, |
88 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, | 88 | [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, |
89 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, | 89 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, |
90 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, | 90 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, |
91 | #else | 91 | #else |
92 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, | 92 | [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, |
93 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, | 93 | [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, |
94 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, | 94 | [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, |
95 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, | 95 | [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, |
96 | /* | 96 | /* |
97 | * Segments used for calling PnP BIOS have byte granularity. | 97 | * Segments used for calling PnP BIOS have byte granularity. |
98 | * They code segments and data segments have fixed 64k limits, | 98 | * They code segments and data segments have fixed 64k limits, |
99 | * the transfer segment sizes are set at run time. | 99 | * the transfer segment sizes are set at run time. |
100 | */ | 100 | */ |
101 | /* 32-bit code */ | 101 | /* 32-bit code */ |
102 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, | 102 | [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, |
103 | /* 16-bit code */ | 103 | /* 16-bit code */ |
104 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, | 104 | [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, |
105 | /* 16-bit data */ | 105 | /* 16-bit data */ |
106 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, | 106 | [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, |
107 | /* 16-bit data */ | 107 | /* 16-bit data */ |
108 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, | 108 | [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, |
109 | /* 16-bit data */ | 109 | /* 16-bit data */ |
110 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, | 110 | [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, |
111 | /* | 111 | /* |
112 | * The APM segments have byte granularity and their bases | 112 | * The APM segments have byte granularity and their bases |
113 | * are set at run time. All have 64k limits. | 113 | * are set at run time. All have 64k limits. |
114 | */ | 114 | */ |
115 | /* 32-bit code */ | 115 | /* 32-bit code */ |
116 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, | 116 | [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, |
117 | /* 16-bit code */ | 117 | /* 16-bit code */ |
118 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, | 118 | [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, |
119 | /* data */ | 119 | /* data */ |
120 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, | 120 | [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, |
121 | 121 | ||
122 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, | 122 | [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, |
123 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, | 123 | [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, |
124 | GDT_STACK_CANARY_INIT | 124 | GDT_STACK_CANARY_INIT |
125 | #endif | 125 | #endif |
126 | } }; | 126 | } }; |
@@ -164,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag) | |||
164 | * the CPUID. Add "volatile" to not allow gcc to | 164 | * the CPUID. Add "volatile" to not allow gcc to |
165 | * optimize the subsequent calls to this function. | 165 | * optimize the subsequent calls to this function. |
166 | */ | 166 | */ |
167 | asm volatile ("pushfl\n\t" | 167 | asm volatile ("pushfl \n\t" |
168 | "pushfl\n\t" | 168 | "pushfl \n\t" |
169 | "popl %0\n\t" | 169 | "popl %0 \n\t" |
170 | "movl %0,%1\n\t" | 170 | "movl %0, %1 \n\t" |
171 | "xorl %2,%0\n\t" | 171 | "xorl %2, %0 \n\t" |
172 | "pushl %0\n\t" | 172 | "pushl %0 \n\t" |
173 | "popfl\n\t" | 173 | "popfl \n\t" |
174 | "pushfl\n\t" | 174 | "pushfl \n\t" |
175 | "popl %0\n\t" | 175 | "popl %0 \n\t" |
176 | "popfl\n\t" | 176 | "popfl \n\t" |
177 | |||
177 | : "=&r" (f1), "=&r" (f2) | 178 | : "=&r" (f1), "=&r" (f2) |
178 | : "ir" (flag)); | 179 | : "ir" (flag)); |
179 | 180 | ||
@@ -188,18 +189,22 @@ static int __cpuinit have_cpuid_p(void) | |||
188 | 189 | ||
189 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 190 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
190 | { | 191 | { |
191 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { | 192 | unsigned long lo, hi; |
192 | /* Disable processor serial number */ | 193 | |
193 | unsigned long lo, hi; | 194 | if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr) |
194 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 195 | return; |
195 | lo |= 0x200000; | 196 | |
196 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); | 197 | /* Disable processor serial number: */ |
197 | printk(KERN_NOTICE "CPU serial number disabled.\n"); | 198 | |
198 | clear_cpu_cap(c, X86_FEATURE_PN); | 199 | rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
199 | 200 | lo |= 0x200000; | |
200 | /* Disabling the serial number may affect the cpuid level */ | 201 | wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); |
201 | c->cpuid_level = cpuid_eax(0); | 202 | |
202 | } | 203 | printk(KERN_NOTICE "CPU serial number disabled.\n"); |
204 | clear_cpu_cap(c, X86_FEATURE_PN); | ||
205 | |||
206 | /* Disabling the serial number may affect the cpuid level */ | ||
207 | c->cpuid_level = cpuid_eax(0); | ||
203 | } | 208 | } |
204 | 209 | ||
205 | static int __init x86_serial_nr_setup(char *s) | 210 | static int __init x86_serial_nr_setup(char *s) |
@@ -232,6 +237,7 @@ struct cpuid_dependent_feature { | |||
232 | u32 feature; | 237 | u32 feature; |
233 | u32 level; | 238 | u32 level; |
234 | }; | 239 | }; |
240 | |||
235 | static const struct cpuid_dependent_feature __cpuinitconst | 241 | static const struct cpuid_dependent_feature __cpuinitconst |
236 | cpuid_dependent_features[] = { | 242 | cpuid_dependent_features[] = { |
237 | { X86_FEATURE_MWAIT, 0x00000005 }, | 243 | { X86_FEATURE_MWAIT, 0x00000005 }, |
@@ -243,7 +249,11 @@ cpuid_dependent_features[] = { | |||
243 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | 249 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
244 | { | 250 | { |
245 | const struct cpuid_dependent_feature *df; | 251 | const struct cpuid_dependent_feature *df; |
252 | |||
246 | for (df = cpuid_dependent_features; df->feature; df++) { | 253 | for (df = cpuid_dependent_features; df->feature; df++) { |
254 | |||
255 | if (!cpu_has(c, df->feature)) | ||
256 | continue; | ||
247 | /* | 257 | /* |
248 | * Note: cpuid_level is set to -1 if unavailable, but | 258 | * Note: cpuid_level is set to -1 if unavailable, but |
249 | * extended_extended_level is set to 0 if unavailable | 259 | * extended_extended_level is set to 0 if unavailable |
@@ -251,32 +261,32 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
251 | * when signed; hence the weird messing around with | 261 | * when signed; hence the weird messing around with |
252 | * signs here... | 262 | * signs here... |
253 | */ | 263 | */ |
254 | if (cpu_has(c, df->feature) && | 264 | if (!((s32)df->level < 0 ? |
255 | ((s32)df->level < 0 ? | ||
256 | (u32)df->level > (u32)c->extended_cpuid_level : | 265 | (u32)df->level > (u32)c->extended_cpuid_level : |
257 | (s32)df->level > (s32)c->cpuid_level)) { | 266 | (s32)df->level > (s32)c->cpuid_level)) |
258 | clear_cpu_cap(c, df->feature); | 267 | continue; |
259 | if (warn) | 268 | |
260 | printk(KERN_WARNING | 269 | clear_cpu_cap(c, df->feature); |
261 | "CPU: CPU feature %s disabled " | 270 | if (!warn) |
262 | "due to lack of CPUID level 0x%x\n", | 271 | continue; |
263 | x86_cap_flags[df->feature], | 272 | |
264 | df->level); | 273 | printk(KERN_WARNING |
265 | } | 274 | "CPU: CPU feature %s disabled, no CPUID level 0x%x\n", |
275 | x86_cap_flags[df->feature], df->level); | ||
266 | } | 276 | } |
267 | } | 277 | } |
268 | 278 | ||
269 | /* | 279 | /* |
270 | * Naming convention should be: <Name> [(<Codename>)] | 280 | * Naming convention should be: <Name> [(<Codename>)] |
271 | * This table only is used unless init_<vendor>() below doesn't set it; | 281 | * This table only is used unless init_<vendor>() below doesn't set it; |
272 | * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used | 282 | * in particular, if CPUID levels 0x80000002..4 are supported, this |
273 | * | 283 | * isn't used |
274 | */ | 284 | */ |
275 | 285 | ||
276 | /* Look up CPU names by table lookup. */ | 286 | /* Look up CPU names by table lookup. */ |
277 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 287 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) |
278 | { | 288 | { |
279 | struct cpu_model_info *info; | 289 | const struct cpu_model_info *info; |
280 | 290 | ||
281 | if (c->x86_model >= 16) | 291 | if (c->x86_model >= 16) |
282 | return NULL; /* Range check */ | 292 | return NULL; /* Range check */ |
@@ -307,8 +317,10 @@ void load_percpu_segment(int cpu) | |||
307 | load_stack_canary_segment(); | 317 | load_stack_canary_segment(); |
308 | } | 318 | } |
309 | 319 | ||
310 | /* Current gdt points %fs at the "master" per-cpu area: after this, | 320 | /* |
311 | * it's on the real one. */ | 321 | * Current gdt points %fs at the "master" per-cpu area: after this, |
322 | * it's on the real one. | ||
323 | */ | ||
312 | void switch_to_new_gdt(int cpu) | 324 | void switch_to_new_gdt(int cpu) |
313 | { | 325 | { |
314 | struct desc_ptr gdt_descr; | 326 | struct desc_ptr gdt_descr; |
@@ -321,7 +333,7 @@ void switch_to_new_gdt(int cpu) | |||
321 | load_percpu_segment(cpu); | 333 | load_percpu_segment(cpu); |
322 | } | 334 | } |
323 | 335 | ||
324 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 336 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
325 | 337 | ||
326 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 338 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
327 | { | 339 | { |
@@ -340,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
340 | #endif | 352 | #endif |
341 | } | 353 | } |
342 | 354 | ||
343 | static struct cpu_dev __cpuinitdata default_cpu = { | 355 | static const struct cpu_dev __cpuinitconst default_cpu = { |
344 | .c_init = default_init, | 356 | .c_init = default_init, |
345 | .c_vendor = "Unknown", | 357 | .c_vendor = "Unknown", |
346 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 358 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
@@ -354,22 +366,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
354 | if (c->extended_cpuid_level < 0x80000004) | 366 | if (c->extended_cpuid_level < 0x80000004) |
355 | return; | 367 | return; |
356 | 368 | ||
357 | v = (unsigned int *) c->x86_model_id; | 369 | v = (unsigned int *)c->x86_model_id; |
358 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); | 370 | cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); |
359 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); | 371 | cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); |
360 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); | 372 | cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); |
361 | c->x86_model_id[48] = 0; | 373 | c->x86_model_id[48] = 0; |
362 | 374 | ||
363 | /* Intel chips right-justify this string for some dumb reason; | 375 | /* |
364 | undo that brain damage */ | 376 | * Intel chips right-justify this string for some dumb reason; |
377 | * undo that brain damage: | ||
378 | */ | ||
365 | p = q = &c->x86_model_id[0]; | 379 | p = q = &c->x86_model_id[0]; |
366 | while (*p == ' ') | 380 | while (*p == ' ') |
367 | p++; | 381 | p++; |
368 | if (p != q) { | 382 | if (p != q) { |
369 | while (*p) | 383 | while (*p) |
370 | *q++ = *p++; | 384 | *q++ = *p++; |
371 | while (q <= &c->x86_model_id[48]) | 385 | while (q <= &c->x86_model_id[48]) |
372 | *q++ = '\0'; /* Zero-pad the rest */ | 386 | *q++ = '\0'; /* Zero-pad the rest */ |
373 | } | 387 | } |
374 | } | 388 | } |
375 | 389 | ||
@@ -438,27 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c) | |||
438 | 452 | ||
439 | if (smp_num_siblings == 1) { | 453 | if (smp_num_siblings == 1) { |
440 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); | 454 | printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); |
441 | } else if (smp_num_siblings > 1) { | 455 | goto out; |
456 | } | ||
442 | 457 | ||
443 | if (smp_num_siblings > nr_cpu_ids) { | 458 | if (smp_num_siblings <= 1) |
444 | printk(KERN_WARNING "CPU: Unsupported number of siblings %d", | 459 | goto out; |
445 | smp_num_siblings); | 460 | |
446 | smp_num_siblings = 1; | 461 | if (smp_num_siblings > nr_cpu_ids) { |
447 | return; | 462 | pr_warning("CPU: Unsupported number of siblings %d", |
448 | } | 463 | smp_num_siblings); |
464 | smp_num_siblings = 1; | ||
465 | return; | ||
466 | } | ||
449 | 467 | ||
450 | index_msb = get_count_order(smp_num_siblings); | 468 | index_msb = get_count_order(smp_num_siblings); |
451 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); | 469 | c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); |
452 | 470 | ||
453 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; | 471 | smp_num_siblings = smp_num_siblings / c->x86_max_cores; |
454 | 472 | ||
455 | index_msb = get_count_order(smp_num_siblings); | 473 | index_msb = get_count_order(smp_num_siblings); |
456 | 474 | ||
457 | core_bits = get_count_order(c->x86_max_cores); | 475 | core_bits = get_count_order(c->x86_max_cores); |
458 | 476 | ||
459 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & | 477 | c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & |
460 | ((1 << core_bits) - 1); | 478 | ((1 << core_bits) - 1); |
461 | } | ||
462 | 479 | ||
463 | out: | 480 | out: |
464 | if ((c->x86_max_cores * smp_num_siblings) > 1) { | 481 | if ((c->x86_max_cores * smp_num_siblings) > 1) { |
@@ -473,8 +490,8 @@ out: | |||
473 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 490 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) |
474 | { | 491 | { |
475 | char *v = c->x86_vendor_id; | 492 | char *v = c->x86_vendor_id; |
476 | int i; | ||
477 | static int printed; | 493 | static int printed; |
494 | int i; | ||
478 | 495 | ||
479 | for (i = 0; i < X86_VENDOR_NUM; i++) { | 496 | for (i = 0; i < X86_VENDOR_NUM; i++) { |
480 | if (!cpu_devs[i]) | 497 | if (!cpu_devs[i]) |
@@ -483,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
483 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || | 500 | if (!strcmp(v, cpu_devs[i]->c_ident[0]) || |
484 | (cpu_devs[i]->c_ident[1] && | 501 | (cpu_devs[i]->c_ident[1] && |
485 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { | 502 | !strcmp(v, cpu_devs[i]->c_ident[1]))) { |
503 | |||
486 | this_cpu = cpu_devs[i]; | 504 | this_cpu = cpu_devs[i]; |
487 | c->x86_vendor = this_cpu->c_x86_vendor; | 505 | c->x86_vendor = this_cpu->c_x86_vendor; |
488 | return; | 506 | return; |
@@ -491,7 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
491 | 509 | ||
492 | if (!printed) { | 510 | if (!printed) { |
493 | printed++; | 511 | printed++; |
494 | printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); | 512 | printk(KERN_ERR |
513 | "CPU: vendor_id '%s' unknown, using generic init.\n", v); | ||
514 | |||
495 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); | 515 | printk(KERN_ERR "CPU: Your system may be unstable.\n"); |
496 | } | 516 | } |
497 | 517 | ||
@@ -511,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
511 | /* Intel-defined flags: level 0x00000001 */ | 531 | /* Intel-defined flags: level 0x00000001 */ |
512 | if (c->cpuid_level >= 0x00000001) { | 532 | if (c->cpuid_level >= 0x00000001) { |
513 | u32 junk, tfms, cap0, misc; | 533 | u32 junk, tfms, cap0, misc; |
534 | |||
514 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); | 535 | cpuid(0x00000001, &tfms, &misc, &junk, &cap0); |
515 | c->x86 = (tfms >> 8) & 0xf; | 536 | c->x86 = (tfms >> 8) & 0xf; |
516 | c->x86_model = (tfms >> 4) & 0xf; | 537 | c->x86_model = (tfms >> 4) & 0xf; |
517 | c->x86_mask = tfms & 0xf; | 538 | c->x86_mask = tfms & 0xf; |
539 | |||
518 | if (c->x86 == 0xf) | 540 | if (c->x86 == 0xf) |
519 | c->x86 += (tfms >> 20) & 0xff; | 541 | c->x86 += (tfms >> 20) & 0xff; |
520 | if (c->x86 >= 0x6) | 542 | if (c->x86 >= 0x6) |
521 | c->x86_model += ((tfms >> 16) & 0xf) << 4; | 543 | c->x86_model += ((tfms >> 16) & 0xf) << 4; |
544 | |||
522 | if (cap0 & (1<<19)) { | 545 | if (cap0 & (1<<19)) { |
523 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; | 546 | c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; |
524 | c->x86_cache_alignment = c->x86_clflush_size; | 547 | c->x86_cache_alignment = c->x86_clflush_size; |
@@ -534,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
534 | /* Intel-defined flags: level 0x00000001 */ | 557 | /* Intel-defined flags: level 0x00000001 */ |
535 | if (c->cpuid_level >= 0x00000001) { | 558 | if (c->cpuid_level >= 0x00000001) { |
536 | u32 capability, excap; | 559 | u32 capability, excap; |
560 | |||
537 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); | 561 | cpuid(0x00000001, &tfms, &ebx, &excap, &capability); |
538 | c->x86_capability[0] = capability; | 562 | c->x86_capability[0] = capability; |
539 | c->x86_capability[4] = excap; | 563 | c->x86_capability[4] = excap; |
@@ -542,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
542 | /* AMD-defined flags: level 0x80000001 */ | 566 | /* AMD-defined flags: level 0x80000001 */ |
543 | xlvl = cpuid_eax(0x80000000); | 567 | xlvl = cpuid_eax(0x80000000); |
544 | c->extended_cpuid_level = xlvl; | 568 | c->extended_cpuid_level = xlvl; |
569 | |||
545 | if ((xlvl & 0xffff0000) == 0x80000000) { | 570 | if ((xlvl & 0xffff0000) == 0x80000000) { |
546 | if (xlvl >= 0x80000001) { | 571 | if (xlvl >= 0x80000001) { |
547 | c->x86_capability[1] = cpuid_edx(0x80000001); | 572 | c->x86_capability[1] = cpuid_edx(0x80000001); |
@@ -549,13 +574,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
549 | } | 574 | } |
550 | } | 575 | } |
551 | 576 | ||
552 | #ifdef CONFIG_X86_64 | ||
553 | if (c->extended_cpuid_level >= 0x80000008) { | 577 | if (c->extended_cpuid_level >= 0x80000008) { |
554 | u32 eax = cpuid_eax(0x80000008); | 578 | u32 eax = cpuid_eax(0x80000008); |
555 | 579 | ||
556 | c->x86_virt_bits = (eax >> 8) & 0xff; | 580 | c->x86_virt_bits = (eax >> 8) & 0xff; |
557 | c->x86_phys_bits = eax & 0xff; | 581 | c->x86_phys_bits = eax & 0xff; |
558 | } | 582 | } |
583 | #ifdef CONFIG_X86_32 | ||
584 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | ||
585 | c->x86_phys_bits = 36; | ||
559 | #endif | 586 | #endif |
560 | 587 | ||
561 | if (c->extended_cpuid_level >= 0x80000007) | 588 | if (c->extended_cpuid_level >= 0x80000007) |
@@ -602,8 +629,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
602 | { | 629 | { |
603 | #ifdef CONFIG_X86_64 | 630 | #ifdef CONFIG_X86_64 |
604 | c->x86_clflush_size = 64; | 631 | c->x86_clflush_size = 64; |
632 | c->x86_phys_bits = 36; | ||
633 | c->x86_virt_bits = 48; | ||
605 | #else | 634 | #else |
606 | c->x86_clflush_size = 32; | 635 | c->x86_clflush_size = 32; |
636 | c->x86_phys_bits = 32; | ||
637 | c->x86_virt_bits = 32; | ||
607 | #endif | 638 | #endif |
608 | c->x86_cache_alignment = c->x86_clflush_size; | 639 | c->x86_cache_alignment = c->x86_clflush_size; |
609 | 640 | ||
@@ -634,12 +665,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
634 | 665 | ||
635 | void __init early_cpu_init(void) | 666 | void __init early_cpu_init(void) |
636 | { | 667 | { |
637 | struct cpu_dev **cdev; | 668 | const struct cpu_dev *const *cdev; |
638 | int count = 0; | 669 | int count = 0; |
639 | 670 | ||
640 | printk("KERNEL supported cpus:\n"); | 671 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
641 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 672 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
642 | struct cpu_dev *cpudev = *cdev; | 673 | const struct cpu_dev *cpudev = *cdev; |
643 | unsigned int j; | 674 | unsigned int j; |
644 | 675 | ||
645 | if (count >= X86_VENDOR_NUM) | 676 | if (count >= X86_VENDOR_NUM) |
@@ -650,7 +681,7 @@ void __init early_cpu_init(void) | |||
650 | for (j = 0; j < 2; j++) { | 681 | for (j = 0; j < 2; j++) { |
651 | if (!cpudev->c_ident[j]) | 682 | if (!cpudev->c_ident[j]) |
652 | continue; | 683 | continue; |
653 | printk(" %s %s\n", cpudev->c_vendor, | 684 | printk(KERN_INFO " %s %s\n", cpudev->c_vendor, |
654 | cpudev->c_ident[j]); | 685 | cpudev->c_ident[j]); |
655 | } | 686 | } |
656 | } | 687 | } |
@@ -726,9 +757,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
726 | c->x86_coreid_bits = 0; | 757 | c->x86_coreid_bits = 0; |
727 | #ifdef CONFIG_X86_64 | 758 | #ifdef CONFIG_X86_64 |
728 | c->x86_clflush_size = 64; | 759 | c->x86_clflush_size = 64; |
760 | c->x86_phys_bits = 36; | ||
761 | c->x86_virt_bits = 48; | ||
729 | #else | 762 | #else |
730 | c->cpuid_level = -1; /* CPUID not detected */ | 763 | c->cpuid_level = -1; /* CPUID not detected */ |
731 | c->x86_clflush_size = 32; | 764 | c->x86_clflush_size = 32; |
765 | c->x86_phys_bits = 32; | ||
766 | c->x86_virt_bits = 32; | ||
732 | #endif | 767 | #endif |
733 | c->x86_cache_alignment = c->x86_clflush_size; | 768 | c->x86_cache_alignment = c->x86_clflush_size; |
734 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 769 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
@@ -759,8 +794,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
759 | squash_the_stupid_serial_number(c); | 794 | squash_the_stupid_serial_number(c); |
760 | 795 | ||
761 | /* | 796 | /* |
762 | * The vendor-specific functions might have changed features. Now | 797 | * The vendor-specific functions might have changed features. |
763 | * we do "generic changes." | 798 | * Now we do "generic changes." |
764 | */ | 799 | */ |
765 | 800 | ||
766 | /* Filter out anything that depends on CPUID levels we don't have */ | 801 | /* Filter out anything that depends on CPUID levels we don't have */ |
@@ -768,7 +803,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
768 | 803 | ||
769 | /* If the model name is still unset, do table lookup. */ | 804 | /* If the model name is still unset, do table lookup. */ |
770 | if (!c->x86_model_id[0]) { | 805 | if (!c->x86_model_id[0]) { |
771 | char *p; | 806 | const char *p; |
772 | p = table_lookup_model(c); | 807 | p = table_lookup_model(c); |
773 | if (p) | 808 | if (p) |
774 | strcpy(c->x86_model_id, p); | 809 | strcpy(c->x86_model_id, p); |
@@ -843,11 +878,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | |||
843 | } | 878 | } |
844 | 879 | ||
845 | struct msr_range { | 880 | struct msr_range { |
846 | unsigned min; | 881 | unsigned min; |
847 | unsigned max; | 882 | unsigned max; |
848 | }; | 883 | }; |
849 | 884 | ||
850 | static struct msr_range msr_range_array[] __cpuinitdata = { | 885 | static const struct msr_range msr_range_array[] __cpuinitconst = { |
851 | { 0x00000000, 0x00000418}, | 886 | { 0x00000000, 0x00000418}, |
852 | { 0xc0000000, 0xc000040b}, | 887 | { 0xc0000000, 0xc000040b}, |
853 | { 0xc0010000, 0xc0010142}, | 888 | { 0xc0010000, 0xc0010142}, |
@@ -856,14 +891,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = { | |||
856 | 891 | ||
857 | static void __cpuinit print_cpu_msr(void) | 892 | static void __cpuinit print_cpu_msr(void) |
858 | { | 893 | { |
894 | unsigned index_min, index_max; | ||
859 | unsigned index; | 895 | unsigned index; |
860 | u64 val; | 896 | u64 val; |
861 | int i; | 897 | int i; |
862 | unsigned index_min, index_max; | ||
863 | 898 | ||
864 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { | 899 | for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { |
865 | index_min = msr_range_array[i].min; | 900 | index_min = msr_range_array[i].min; |
866 | index_max = msr_range_array[i].max; | 901 | index_max = msr_range_array[i].max; |
902 | |||
867 | for (index = index_min; index < index_max; index++) { | 903 | for (index = index_min; index < index_max; index++) { |
868 | if (rdmsrl_amd_safe(index, &val)) | 904 | if (rdmsrl_amd_safe(index, &val)) |
869 | continue; | 905 | continue; |
@@ -873,6 +909,7 @@ static void __cpuinit print_cpu_msr(void) | |||
873 | } | 909 | } |
874 | 910 | ||
875 | static int show_msr __cpuinitdata; | 911 | static int show_msr __cpuinitdata; |
912 | |||
876 | static __init int setup_show_msr(char *arg) | 913 | static __init int setup_show_msr(char *arg) |
877 | { | 914 | { |
878 | int num; | 915 | int num; |
@@ -894,12 +931,14 @@ __setup("noclflush", setup_noclflush); | |||
894 | 931 | ||
895 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 932 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
896 | { | 933 | { |
897 | char *vendor = NULL; | 934 | const char *vendor = NULL; |
898 | 935 | ||
899 | if (c->x86_vendor < X86_VENDOR_NUM) | 936 | if (c->x86_vendor < X86_VENDOR_NUM) { |
900 | vendor = this_cpu->c_vendor; | 937 | vendor = this_cpu->c_vendor; |
901 | else if (c->cpuid_level >= 0) | 938 | } else { |
902 | vendor = c->x86_vendor_id; | 939 | if (c->cpuid_level >= 0) |
940 | vendor = c->x86_vendor_id; | ||
941 | } | ||
903 | 942 | ||
904 | if (vendor && !strstr(c->x86_model_id, vendor)) | 943 | if (vendor && !strstr(c->x86_model_id, vendor)) |
905 | printk(KERN_CONT "%s ", vendor); | 944 | printk(KERN_CONT "%s ", vendor); |
@@ -926,10 +965,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
926 | static __init int setup_disablecpuid(char *arg) | 965 | static __init int setup_disablecpuid(char *arg) |
927 | { | 966 | { |
928 | int bit; | 967 | int bit; |
968 | |||
929 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) | 969 | if (get_option(&arg, &bit) && bit < NCAPINTS*32) |
930 | setup_clear_cpu_cap(bit); | 970 | setup_clear_cpu_cap(bit); |
931 | else | 971 | else |
932 | return 0; | 972 | return 0; |
973 | |||
933 | return 1; | 974 | return 1; |
934 | } | 975 | } |
935 | __setup("clearcpuid=", setup_disablecpuid); | 976 | __setup("clearcpuid=", setup_disablecpuid); |
@@ -939,6 +980,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table }; | |||
939 | 980 | ||
940 | DEFINE_PER_CPU_FIRST(union irq_stack_union, | 981 | DEFINE_PER_CPU_FIRST(union irq_stack_union, |
941 | irq_stack_union) __aligned(PAGE_SIZE); | 982 | irq_stack_union) __aligned(PAGE_SIZE); |
983 | |||
942 | DEFINE_PER_CPU(char *, irq_stack_ptr) = | 984 | DEFINE_PER_CPU(char *, irq_stack_ptr) = |
943 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; | 985 | init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; |
944 | 986 | ||
@@ -948,12 +990,21 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack); | |||
948 | 990 | ||
949 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; | 991 | DEFINE_PER_CPU(unsigned int, irq_count) = -1; |
950 | 992 | ||
993 | /* | ||
994 | * Special IST stacks which the CPU switches to when it calls | ||
995 | * an IST-marked descriptor entry. Up to 7 stacks (hardware | ||
996 | * limit), all of them are 4K, except the debug stack which | ||
997 | * is 8K. | ||
998 | */ | ||
999 | static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = { | ||
1000 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | ||
1001 | [DEBUG_STACK - 1] = DEBUG_STKSZ | ||
1002 | }; | ||
1003 | |||
951 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks | 1004 | static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks |
952 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) | 1005 | [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) |
953 | __aligned(PAGE_SIZE); | 1006 | __aligned(PAGE_SIZE); |
954 | 1007 | ||
955 | extern asmlinkage void ignore_sysret(void); | ||
956 | |||
957 | /* May not be marked __init: used by software suspend */ | 1008 | /* May not be marked __init: used by software suspend */ |
958 | void syscall_init(void) | 1009 | void syscall_init(void) |
959 | { | 1010 | { |
@@ -983,7 +1034,7 @@ unsigned long kernel_eflags; | |||
983 | */ | 1034 | */ |
984 | DEFINE_PER_CPU(struct orig_ist, orig_ist); | 1035 | DEFINE_PER_CPU(struct orig_ist, orig_ist); |
985 | 1036 | ||
986 | #else /* x86_64 */ | 1037 | #else /* CONFIG_X86_64 */ |
987 | 1038 | ||
988 | #ifdef CONFIG_CC_STACKPROTECTOR | 1039 | #ifdef CONFIG_CC_STACKPROTECTOR |
989 | DEFINE_PER_CPU(unsigned long, stack_canary); | 1040 | DEFINE_PER_CPU(unsigned long, stack_canary); |
@@ -995,9 +1046,26 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
995 | memset(regs, 0, sizeof(struct pt_regs)); | 1046 | memset(regs, 0, sizeof(struct pt_regs)); |
996 | regs->fs = __KERNEL_PERCPU; | 1047 | regs->fs = __KERNEL_PERCPU; |
997 | regs->gs = __KERNEL_STACK_CANARY; | 1048 | regs->gs = __KERNEL_STACK_CANARY; |
1049 | |||
998 | return regs; | 1050 | return regs; |
999 | } | 1051 | } |
1000 | #endif /* x86_64 */ | 1052 | #endif /* CONFIG_X86_64 */ |
1053 | |||
1054 | /* | ||
1055 | * Clear all 6 debug registers: | ||
1056 | */ | ||
1057 | static void clear_all_debug_regs(void) | ||
1058 | { | ||
1059 | int i; | ||
1060 | |||
1061 | for (i = 0; i < 8; i++) { | ||
1062 | /* Ignore db4, db5 */ | ||
1063 | if ((i == 4) || (i == 5)) | ||
1064 | continue; | ||
1065 | |||
1066 | set_debugreg(0, i); | ||
1067 | } | ||
1068 | } | ||
1001 | 1069 | ||
1002 | /* | 1070 | /* |
1003 | * cpu_init() initializes state that is per-CPU. Some data is already | 1071 | * cpu_init() initializes state that is per-CPU. Some data is already |
@@ -1007,15 +1075,20 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs) | |||
1007 | * A lot of state is already set up in PDA init for 64 bit | 1075 | * A lot of state is already set up in PDA init for 64 bit |
1008 | */ | 1076 | */ |
1009 | #ifdef CONFIG_X86_64 | 1077 | #ifdef CONFIG_X86_64 |
1078 | |||
1010 | void __cpuinit cpu_init(void) | 1079 | void __cpuinit cpu_init(void) |
1011 | { | 1080 | { |
1012 | int cpu = stack_smp_processor_id(); | 1081 | struct orig_ist *orig_ist; |
1013 | struct tss_struct *t = &per_cpu(init_tss, cpu); | ||
1014 | struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu); | ||
1015 | unsigned long v; | ||
1016 | struct task_struct *me; | 1082 | struct task_struct *me; |
1083 | struct tss_struct *t; | ||
1084 | unsigned long v; | ||
1085 | int cpu; | ||
1017 | int i; | 1086 | int i; |
1018 | 1087 | ||
1088 | cpu = stack_smp_processor_id(); | ||
1089 | t = &per_cpu(init_tss, cpu); | ||
1090 | orig_ist = &per_cpu(orig_ist, cpu); | ||
1091 | |||
1019 | #ifdef CONFIG_NUMA | 1092 | #ifdef CONFIG_NUMA |
1020 | if (cpu != 0 && percpu_read(node_number) == 0 && | 1093 | if (cpu != 0 && percpu_read(node_number) == 0 && |
1021 | cpu_to_node(cpu) != NUMA_NO_NODE) | 1094 | cpu_to_node(cpu) != NUMA_NO_NODE) |
@@ -1056,19 +1129,17 @@ void __cpuinit cpu_init(void) | |||
1056 | * set up and load the per-CPU TSS | 1129 | * set up and load the per-CPU TSS |
1057 | */ | 1130 | */ |
1058 | if (!orig_ist->ist[0]) { | 1131 | if (!orig_ist->ist[0]) { |
1059 | static const unsigned int sizes[N_EXCEPTION_STACKS] = { | ||
1060 | [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ, | ||
1061 | [DEBUG_STACK - 1] = DEBUG_STKSZ | ||
1062 | }; | ||
1063 | char *estacks = per_cpu(exception_stacks, cpu); | 1132 | char *estacks = per_cpu(exception_stacks, cpu); |
1133 | |||
1064 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { | 1134 | for (v = 0; v < N_EXCEPTION_STACKS; v++) { |
1065 | estacks += sizes[v]; | 1135 | estacks += exception_stack_sizes[v]; |
1066 | orig_ist->ist[v] = t->x86_tss.ist[v] = | 1136 | orig_ist->ist[v] = t->x86_tss.ist[v] = |
1067 | (unsigned long)estacks; | 1137 | (unsigned long)estacks; |
1068 | } | 1138 | } |
1069 | } | 1139 | } |
1070 | 1140 | ||
1071 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); | 1141 | t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); |
1142 | |||
1072 | /* | 1143 | /* |
1073 | * <= is required because the CPU will access up to | 1144 | * <= is required because the CPU will access up to |
1074 | * 8 bits beyond the end of the IO permission bitmap. | 1145 | * 8 bits beyond the end of the IO permission bitmap. |
@@ -1097,17 +1168,7 @@ void __cpuinit cpu_init(void) | |||
1097 | arch_kgdb_ops.correct_hw_break(); | 1168 | arch_kgdb_ops.correct_hw_break(); |
1098 | else | 1169 | else |
1099 | #endif | 1170 | #endif |
1100 | { | 1171 | clear_all_debug_regs(); |
1101 | /* | ||
1102 | * Clear all 6 debug registers: | ||
1103 | */ | ||
1104 | set_debugreg(0UL, 0); | ||
1105 | set_debugreg(0UL, 1); | ||
1106 | set_debugreg(0UL, 2); | ||
1107 | set_debugreg(0UL, 3); | ||
1108 | set_debugreg(0UL, 6); | ||
1109 | set_debugreg(0UL, 7); | ||
1110 | } | ||
1111 | 1172 | ||
1112 | fpu_init(); | 1173 | fpu_init(); |
1113 | 1174 | ||
@@ -1128,7 +1189,8 @@ void __cpuinit cpu_init(void) | |||
1128 | 1189 | ||
1129 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { | 1190 | if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { |
1130 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); | 1191 | printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); |
1131 | for (;;) local_irq_enable(); | 1192 | for (;;) |
1193 | local_irq_enable(); | ||
1132 | } | 1194 | } |
1133 | 1195 | ||
1134 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); | 1196 | printk(KERN_INFO "Initializing CPU#%d\n", cpu); |
@@ -1157,13 +1219,7 @@ void __cpuinit cpu_init(void) | |||
1157 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); | 1219 | __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); |
1158 | #endif | 1220 | #endif |
1159 | 1221 | ||
1160 | /* Clear all 6 debug registers: */ | 1222 | clear_all_debug_regs(); |
1161 | set_debugreg(0, 0); | ||
1162 | set_debugreg(0, 1); | ||
1163 | set_debugreg(0, 2); | ||
1164 | set_debugreg(0, 3); | ||
1165 | set_debugreg(0, 6); | ||
1166 | set_debugreg(0, 7); | ||
1167 | 1223 | ||
1168 | /* | 1224 | /* |
1169 | * Force FPU initialization: | 1225 | * Force FPU initialization: |
@@ -1183,6 +1239,4 @@ void __cpuinit cpu_init(void) | |||
1183 | 1239 | ||
1184 | xsave_init(); | 1240 | xsave_init(); |
1185 | } | 1241 | } |
1186 | |||
1187 | |||
1188 | #endif | 1242 | #endif |
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index de4094a39210..9469ecb5aeb8 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h | |||
@@ -5,15 +5,15 @@ | |||
5 | struct cpu_model_info { | 5 | struct cpu_model_info { |
6 | int vendor; | 6 | int vendor; |
7 | int family; | 7 | int family; |
8 | char *model_names[16]; | 8 | const char *model_names[16]; |
9 | }; | 9 | }; |
10 | 10 | ||
11 | /* attempt to consolidate cpu attributes */ | 11 | /* attempt to consolidate cpu attributes */ |
12 | struct cpu_dev { | 12 | struct cpu_dev { |
13 | char * c_vendor; | 13 | const char * c_vendor; |
14 | 14 | ||
15 | /* some have two possibilities for cpuid string */ | 15 | /* some have two possibilities for cpuid string */ |
16 | char * c_ident[2]; | 16 | const char * c_ident[2]; |
17 | 17 | ||
18 | struct cpu_model_info c_models[4]; | 18 | struct cpu_model_info c_models[4]; |
19 | 19 | ||
@@ -25,11 +25,12 @@ struct cpu_dev { | |||
25 | }; | 25 | }; |
26 | 26 | ||
27 | #define cpu_dev_register(cpu_devX) \ | 27 | #define cpu_dev_register(cpu_devX) \ |
28 | static struct cpu_dev *__cpu_dev_##cpu_devX __used \ | 28 | static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \ |
29 | __attribute__((__section__(".x86_cpu_dev.init"))) = \ | 29 | __attribute__((__section__(".x86_cpu_dev.init"))) = \ |
30 | &cpu_devX; | 30 | &cpu_devX; |
31 | 31 | ||
32 | extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; | 32 | extern const struct cpu_dev *const __x86_cpu_dev_start[], |
33 | *const __x86_cpu_dev_end[]; | ||
33 | 34 | ||
34 | extern void display_cacheinfo(struct cpuinfo_x86 *c); | 35 | extern void display_cacheinfo(struct cpuinfo_x86 *c); |
35 | 36 | ||
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c index 9abbcbd933cc..21c0cf8ced18 100755 --- a/arch/x86/kernel/cpu/cpu_debug.c +++ b/arch/x86/kernel/cpu/cpu_debug.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/seq_file.h> | 11 | #include <linux/seq_file.h> |
12 | #include <linux/debugfs.h> | 12 | #include <linux/debugfs.h> |
13 | #include <linux/kprobes.h> | 13 | #include <linux/kprobes.h> |
14 | #include <linux/uaccess.h> | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/module.h> | 16 | #include <linux/module.h> |
16 | #include <linux/percpu.h> | 17 | #include <linux/percpu.h> |
@@ -40,41 +41,41 @@ static DEFINE_MUTEX(cpu_debug_lock); | |||
40 | static struct dentry *cpu_debugfs_dir; | 41 | static struct dentry *cpu_debugfs_dir; |
41 | 42 | ||
42 | static struct cpu_debug_base cpu_base[] = { | 43 | static struct cpu_debug_base cpu_base[] = { |
43 | { "mc", CPU_MC }, /* Machine Check */ | 44 | { "mc", CPU_MC, 0 }, |
44 | { "monitor", CPU_MONITOR }, /* Monitor */ | 45 | { "monitor", CPU_MONITOR, 0 }, |
45 | { "time", CPU_TIME }, /* Time */ | 46 | { "time", CPU_TIME, 0 }, |
46 | { "pmc", CPU_PMC }, /* Performance Monitor */ | 47 | { "pmc", CPU_PMC, 1 }, |
47 | { "platform", CPU_PLATFORM }, /* Platform */ | 48 | { "platform", CPU_PLATFORM, 0 }, |
48 | { "apic", CPU_APIC }, /* APIC */ | 49 | { "apic", CPU_APIC, 0 }, |
49 | { "poweron", CPU_POWERON }, /* Power-on */ | 50 | { "poweron", CPU_POWERON, 0 }, |
50 | { "control", CPU_CONTROL }, /* Control */ | 51 | { "control", CPU_CONTROL, 0 }, |
51 | { "features", CPU_FEATURES }, /* Features control */ | 52 | { "features", CPU_FEATURES, 0 }, |
52 | { "lastbranch", CPU_LBRANCH }, /* Last Branch */ | 53 | { "lastbranch", CPU_LBRANCH, 0 }, |
53 | { "bios", CPU_BIOS }, /* BIOS */ | 54 | { "bios", CPU_BIOS, 0 }, |
54 | { "freq", CPU_FREQ }, /* Frequency */ | 55 | { "freq", CPU_FREQ, 0 }, |
55 | { "mtrr", CPU_MTRR }, /* MTRR */ | 56 | { "mtrr", CPU_MTRR, 0 }, |
56 | { "perf", CPU_PERF }, /* Performance */ | 57 | { "perf", CPU_PERF, 0 }, |
57 | { "cache", CPU_CACHE }, /* Cache */ | 58 | { "cache", CPU_CACHE, 0 }, |
58 | { "sysenter", CPU_SYSENTER }, /* Sysenter */ | 59 | { "sysenter", CPU_SYSENTER, 0 }, |
59 | { "therm", CPU_THERM }, /* Thermal */ | 60 | { "therm", CPU_THERM, 0 }, |
60 | { "misc", CPU_MISC }, /* Miscellaneous */ | 61 | { "misc", CPU_MISC, 0 }, |
61 | { "debug", CPU_DEBUG }, /* Debug */ | 62 | { "debug", CPU_DEBUG, 0 }, |
62 | { "pat", CPU_PAT }, /* PAT */ | 63 | { "pat", CPU_PAT, 0 }, |
63 | { "vmx", CPU_VMX }, /* VMX */ | 64 | { "vmx", CPU_VMX, 0 }, |
64 | { "call", CPU_CALL }, /* System Call */ | 65 | { "call", CPU_CALL, 0 }, |
65 | { "base", CPU_BASE }, /* BASE Address */ | 66 | { "base", CPU_BASE, 0 }, |
66 | { "smm", CPU_SMM }, /* System mgmt mode */ | 67 | { "smm", CPU_SMM, 0 }, |
67 | { "svm", CPU_SVM }, /*Secure Virtial Machine*/ | 68 | { "svm", CPU_SVM, 0 }, |
68 | { "osvm", CPU_OSVM }, /* OS-Visible Workaround*/ | 69 | { "osvm", CPU_OSVM, 0 }, |
69 | { "tss", CPU_TSS }, /* Task Stack Segment */ | 70 | { "tss", CPU_TSS, 0 }, |
70 | { "cr", CPU_CR }, /* Control Registers */ | 71 | { "cr", CPU_CR, 0 }, |
71 | { "dt", CPU_DT }, /* Descriptor Table */ | 72 | { "dt", CPU_DT, 0 }, |
72 | { "registers", CPU_REG_ALL }, /* Select all Registers */ | 73 | { "registers", CPU_REG_ALL, 0 }, |
73 | }; | 74 | }; |
74 | 75 | ||
75 | static struct cpu_file_base cpu_file[] = { | 76 | static struct cpu_file_base cpu_file[] = { |
76 | { "index", CPU_REG_ALL }, /* index */ | 77 | { "index", CPU_REG_ALL, 0 }, |
77 | { "value", CPU_REG_ALL }, /* value */ | 78 | { "value", CPU_REG_ALL, 1 }, |
78 | }; | 79 | }; |
79 | 80 | ||
80 | /* Intel Registers Range */ | 81 | /* Intel Registers Range */ |
@@ -608,9 +609,62 @@ static int cpu_seq_open(struct inode *inode, struct file *file) | |||
608 | return err; | 609 | return err; |
609 | } | 610 | } |
610 | 611 | ||
612 | static int write_msr(struct cpu_private *priv, u64 val) | ||
613 | { | ||
614 | u32 low, high; | ||
615 | |||
616 | high = (val >> 32) & 0xffffffff; | ||
617 | low = val & 0xffffffff; | ||
618 | |||
619 | if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high)) | ||
620 | return 0; | ||
621 | |||
622 | return -EPERM; | ||
623 | } | ||
624 | |||
625 | static int write_cpu_register(struct cpu_private *priv, const char *buf) | ||
626 | { | ||
627 | int ret = -EPERM; | ||
628 | u64 val; | ||
629 | |||
630 | ret = strict_strtoull(buf, 0, &val); | ||
631 | if (ret < 0) | ||
632 | return ret; | ||
633 | |||
634 | /* Supporting only MSRs */ | ||
635 | if (priv->type < CPU_TSS_BIT) | ||
636 | return write_msr(priv, val); | ||
637 | |||
638 | return ret; | ||
639 | } | ||
640 | |||
641 | static ssize_t cpu_write(struct file *file, const char __user *ubuf, | ||
642 | size_t count, loff_t *off) | ||
643 | { | ||
644 | struct seq_file *seq = file->private_data; | ||
645 | struct cpu_private *priv = seq->private; | ||
646 | char buf[19]; | ||
647 | |||
648 | if ((priv == NULL) || (count >= sizeof(buf))) | ||
649 | return -EINVAL; | ||
650 | |||
651 | if (copy_from_user(&buf, ubuf, count)) | ||
652 | return -EFAULT; | ||
653 | |||
654 | buf[count] = 0; | ||
655 | |||
656 | if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write)) | ||
657 | if (!write_cpu_register(priv, buf)) | ||
658 | return count; | ||
659 | |||
660 | return -EACCES; | ||
661 | } | ||
662 | |||
611 | static const struct file_operations cpu_fops = { | 663 | static const struct file_operations cpu_fops = { |
664 | .owner = THIS_MODULE, | ||
612 | .open = cpu_seq_open, | 665 | .open = cpu_seq_open, |
613 | .read = seq_read, | 666 | .read = seq_read, |
667 | .write = cpu_write, | ||
614 | .llseek = seq_lseek, | 668 | .llseek = seq_lseek, |
615 | .release = seq_release, | 669 | .release = seq_release, |
616 | }; | 670 | }; |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index ffd0f5ed071a..593171e967ef 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
61 | */ | 61 | */ |
62 | static unsigned char Cx86_dir0_msb __cpuinitdata = 0; | 62 | static unsigned char Cx86_dir0_msb __cpuinitdata = 0; |
63 | 63 | ||
64 | static char Cx86_model[][9] __cpuinitdata = { | 64 | static const char __cpuinitconst Cx86_model[][9] = { |
65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", | 65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", |
66 | "M II ", "Unknown" | 66 | "M II ", "Unknown" |
67 | }; | 67 | }; |
68 | static char Cx486_name[][5] __cpuinitdata = { | 68 | static const char __cpuinitconst Cx486_name[][5] = { |
69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", | 69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", |
70 | "SRx2", "DRx2" | 70 | "SRx2", "DRx2" |
71 | }; | 71 | }; |
72 | static char Cx486S_name[][4] __cpuinitdata = { | 72 | static const char __cpuinitconst Cx486S_name[][4] = { |
73 | "S", "S2", "Se", "S2e" | 73 | "S", "S2", "Se", "S2e" |
74 | }; | 74 | }; |
75 | static char Cx486D_name[][4] __cpuinitdata = { | 75 | static const char __cpuinitconst Cx486D_name[][4] = { |
76 | "DX", "DX2", "?", "?", "?", "DX4" | 76 | "DX", "DX2", "?", "?", "?", "DX4" |
77 | }; | 77 | }; |
78 | static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; | 78 | static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; |
79 | static char cyrix_model_mult1[] __cpuinitdata = "12??43"; | 79 | static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; |
80 | static char cyrix_model_mult2[] __cpuinitdata = "12233445"; | 80 | static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old | 83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old |
@@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
435 | } | 435 | } |
436 | } | 436 | } |
437 | 437 | ||
438 | static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | 438 | static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { |
439 | .c_vendor = "Cyrix", | 439 | .c_vendor = "Cyrix", |
440 | .c_ident = { "CyrixInstead" }, | 440 | .c_ident = { "CyrixInstead" }, |
441 | .c_early_init = early_init_cyrix, | 441 | .c_early_init = early_init_cyrix, |
@@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { | |||
446 | 446 | ||
447 | cpu_dev_register(cyrix_cpu_dev); | 447 | cpu_dev_register(cyrix_cpu_dev); |
448 | 448 | ||
449 | static struct cpu_dev nsc_cpu_dev __cpuinitdata = { | 449 | static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { |
450 | .c_vendor = "NSC", | 450 | .c_vendor = "NSC", |
451 | .c_ident = { "Geode by NSC" }, | 451 | .c_ident = { "Geode by NSC" }, |
452 | .c_init = init_nsc, | 452 | .c_init = init_nsc, |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 191117f1ad51..b09d4eb52bb9 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -54,6 +54,11 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
54 | c->x86_cache_alignment = 128; | 54 | c->x86_cache_alignment = 128; |
55 | #endif | 55 | #endif |
56 | 56 | ||
57 | /* CPUID workaround for 0F33/0F34 CPU */ | ||
58 | if (c->x86 == 0xF && c->x86_model == 0x3 | ||
59 | && (c->x86_mask == 0x3 || c->x86_mask == 0x4)) | ||
60 | c->x86_phys_bits = 36; | ||
61 | |||
57 | /* | 62 | /* |
58 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate | 63 | * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate |
59 | * with P/T states and does not stop in deep C-states | 64 | * with P/T states and does not stop in deep C-states |
@@ -410,7 +415,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
410 | } | 415 | } |
411 | #endif | 416 | #endif |
412 | 417 | ||
413 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { | 418 | static const struct cpu_dev __cpuinitconst intel_cpu_dev = { |
414 | .c_vendor = "Intel", | 419 | .c_vendor = "Intel", |
415 | .c_ident = { "GenuineIntel" }, | 420 | .c_ident = { "GenuineIntel" }, |
416 | #ifdef CONFIG_X86_32 | 421 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 7293508d8f5c..c471eb1a389c 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -32,7 +32,7 @@ struct _cache_table | |||
32 | }; | 32 | }; |
33 | 33 | ||
34 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ | 34 | /* all the cache descriptor types we care about (no TLB or trace cache entries) */ |
35 | static struct _cache_table cache_table[] __cpuinitdata = | 35 | static const struct _cache_table __cpuinitconst cache_table[] = |
36 | { | 36 | { |
37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 37 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 38 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
@@ -206,15 +206,15 @@ union l3_cache { | |||
206 | unsigned val; | 206 | unsigned val; |
207 | }; | 207 | }; |
208 | 208 | ||
209 | static unsigned short assocs[] __cpuinitdata = { | 209 | static const unsigned short __cpuinitconst assocs[] = { |
210 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, | 210 | [1] = 1, [2] = 2, [4] = 4, [6] = 8, |
211 | [8] = 16, [0xa] = 32, [0xb] = 48, | 211 | [8] = 16, [0xa] = 32, [0xb] = 48, |
212 | [0xc] = 64, | 212 | [0xc] = 64, |
213 | [0xf] = 0xffff // ?? | 213 | [0xf] = 0xffff // ?? |
214 | }; | 214 | }; |
215 | 215 | ||
216 | static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; | 216 | static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; |
217 | static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; | 217 | static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; |
218 | 218 | ||
219 | static void __cpuinit | 219 | static void __cpuinit |
220 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 220 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 52b3fefbd5af..bb62b3e5caad 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
98 | #endif | 98 | #endif |
99 | } | 99 | } |
100 | 100 | ||
101 | static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { | 101 | static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { |
102 | .c_vendor = "Transmeta", | 102 | .c_vendor = "Transmeta", |
103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
104 | .c_early_init = early_init_transmeta, | 104 | .c_early_init = early_init_transmeta, |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index e777f79e0960..fd2c37bf7acb 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * so no special init takes place. | 8 | * so no special init takes place. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | static struct cpu_dev umc_cpu_dev __cpuinitdata = { | 11 | static const struct cpu_dev __cpuinitconst umc_cpu_dev = { |
12 | .c_vendor = "UMC", | 12 | .c_vendor = "UMC", |
13 | .c_ident = { "UMC UMC UMC" }, | 13 | .c_ident = { "UMC UMC UMC" }, |
14 | .c_models = { | 14 | .c_models = { |