aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu')
-rw-r--r--arch/x86/kernel/cpu/Makefile2
-rw-r--r--arch/x86/kernel/cpu/addon_cpuid_features.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/centaur.c2
-rw-r--r--arch/x86/kernel/cpu/centaur_64.c2
-rw-r--r--arch/x86/kernel/cpu/common.c384
-rw-r--r--arch/x86/kernel/cpu/cpu.h11
-rwxr-xr-xarch/x86/kernel/cpu/cpu_debug.c839
-rw-r--r--arch/x86/kernel/cpu/cyrix.c16
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c8
-rw-r--r--arch/x86/kernel/cpu/transmeta.c2
-rw-r--r--arch/x86/kernel/cpu/umc.c2
13 files changed, 1079 insertions, 195 deletions
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 82db7f45e2d..d4356f8b752 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -14,6 +14,8 @@ obj-y += vmware.o hypervisor.o
14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o 14obj-$(CONFIG_X86_32) += bugs.o cmpxchg.o
15obj-$(CONFIG_X86_64) += bugs_64.o 15obj-$(CONFIG_X86_64) += bugs_64.o
16 16
17obj-$(CONFIG_X86_CPU_DEBUG) += cpu_debug.o
18
17obj-$(CONFIG_CPU_SUP_INTEL) += intel.o 19obj-$(CONFIG_CPU_SUP_INTEL) += intel.o
18obj-$(CONFIG_CPU_SUP_AMD) += amd.o 20obj-$(CONFIG_CPU_SUP_AMD) += amd.o
19obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o 21obj-$(CONFIG_CPU_SUP_CYRIX_32) += cyrix.o
diff --git a/arch/x86/kernel/cpu/addon_cpuid_features.c b/arch/x86/kernel/cpu/addon_cpuid_features.c
index 6882a735d9c..8220ae69849 100644
--- a/arch/x86/kernel/cpu/addon_cpuid_features.c
+++ b/arch/x86/kernel/cpu/addon_cpuid_features.c
@@ -29,7 +29,7 @@ void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c)
29 u32 regs[4]; 29 u32 regs[4];
30 const struct cpuid_bit *cb; 30 const struct cpuid_bit *cb;
31 31
32 static const struct cpuid_bit cpuid_bits[] = { 32 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = {
33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 }, 33 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006 },
34 { 0, 0, 0, 0 } 34 { 0, 0, 0, 0 }
35 }; 35 };
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index f47df59016c..7e4a459daa6 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -502,7 +502,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int
502} 502}
503#endif 503#endif
504 504
505static struct cpu_dev amd_cpu_dev __cpuinitdata = { 505static const struct cpu_dev __cpuinitconst amd_cpu_dev = {
506 .c_vendor = "AMD", 506 .c_vendor = "AMD",
507 .c_ident = { "AuthenticAMD" }, 507 .c_ident = { "AuthenticAMD" },
508#ifdef CONFIG_X86_32 508#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 89bfdd9cacc..983e0830f0d 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -468,7 +468,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
468 return size; 468 return size;
469} 469}
470 470
471static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 471static const struct cpu_dev __cpuinitconst centaur_cpu_dev = {
472 .c_vendor = "Centaur", 472 .c_vendor = "Centaur",
473 .c_ident = { "CentaurHauls" }, 473 .c_ident = { "CentaurHauls" },
474 .c_early_init = early_init_centaur, 474 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/centaur_64.c b/arch/x86/kernel/cpu/centaur_64.c
index a1625f5a1e7..51b09c48c9c 100644
--- a/arch/x86/kernel/cpu/centaur_64.c
+++ b/arch/x86/kernel/cpu/centaur_64.c
@@ -25,7 +25,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC); 25 set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
26} 26}
27 27
28static struct cpu_dev centaur_cpu_dev __cpuinitdata = { 28static const struct cpu_dev centaur_cpu_dev __cpuinitconst = {
29 .c_vendor = "Centaur", 29 .c_vendor = "Centaur",
30 .c_ident = { "CentaurHauls" }, 30 .c_ident = { "CentaurHauls" },
31 .c_early_init = early_init_centaur, 31 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a95e9480bb9..e2962cc1e27 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,52 +1,52 @@
1#include <linux/init.h>
2#include <linux/kernel.h>
3#include <linux/sched.h>
4#include <linux/string.h>
5#include <linux/bootmem.h> 1#include <linux/bootmem.h>
2#include <linux/linkage.h>
6#include <linux/bitops.h> 3#include <linux/bitops.h>
4#include <linux/kernel.h>
7#include <linux/module.h> 5#include <linux/module.h>
8#include <linux/kgdb.h> 6#include <linux/percpu.h>
9#include <linux/topology.h> 7#include <linux/string.h>
10#include <linux/delay.h> 8#include <linux/delay.h>
9#include <linux/sched.h>
10#include <linux/init.h>
11#include <linux/kgdb.h>
11#include <linux/smp.h> 12#include <linux/smp.h>
12#include <linux/percpu.h> 13#include <linux/io.h>
13#include <asm/i387.h> 14
14#include <asm/msr.h> 15#include <asm/stackprotector.h>
15#include <asm/io.h>
16#include <asm/linkage.h>
17#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/hypervisor.h>
18#include <asm/processor.h>
19#include <asm/sections.h>
20#include <asm/topology.h>
21#include <asm/cpumask.h>
22#include <asm/pgtable.h>
23#include <asm/atomic.h>
24#include <asm/proto.h>
25#include <asm/setup.h>
26#include <asm/apic.h>
27#include <asm/desc.h>
28#include <asm/i387.h>
18#include <asm/mtrr.h> 29#include <asm/mtrr.h>
30#include <asm/numa.h>
31#include <asm/asm.h>
32#include <asm/cpu.h>
19#include <asm/mce.h> 33#include <asm/mce.h>
34#include <asm/msr.h>
20#include <asm/pat.h> 35#include <asm/pat.h>
21#include <asm/asm.h>
22#include <asm/numa.h>
23#include <asm/smp.h> 36#include <asm/smp.h>
24#include <asm/cpu.h>
25#include <asm/cpumask.h>
26#include <asm/apic.h>
27 37
28#ifdef CONFIG_X86_LOCAL_APIC 38#ifdef CONFIG_X86_LOCAL_APIC
29#include <asm/uv/uv.h> 39#include <asm/uv/uv.h>
30#endif 40#endif
31 41
32#include <asm/pgtable.h>
33#include <asm/processor.h>
34#include <asm/desc.h>
35#include <asm/atomic.h>
36#include <asm/proto.h>
37#include <asm/sections.h>
38#include <asm/setup.h>
39#include <asm/hypervisor.h>
40#include <asm/stackprotector.h>
41
42#include "cpu.h" 42#include "cpu.h"
43 43
44#ifdef CONFIG_X86_64 44#ifdef CONFIG_X86_64
45 45
46/* all of these masks are initialized in setup_cpu_local_masks() */ 46/* all of these masks are initialized in setup_cpu_local_masks() */
47cpumask_var_t cpu_callin_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_initialized_mask; 47cpumask_var_t cpu_initialized_mask;
48cpumask_var_t cpu_callout_mask;
49cpumask_var_t cpu_callin_mask;
50 50
51/* representing cpus for which sibling maps can be computed */ 51/* representing cpus for which sibling maps can be computed */
52cpumask_var_t cpu_sibling_setup_mask; 52cpumask_var_t cpu_sibling_setup_mask;
@@ -62,15 +62,15 @@ void __init setup_cpu_local_masks(void)
62 62
63#else /* CONFIG_X86_32 */ 63#else /* CONFIG_X86_32 */
64 64
65cpumask_t cpu_callin_map; 65cpumask_t cpu_sibling_setup_map;
66cpumask_t cpu_callout_map; 66cpumask_t cpu_callout_map;
67cpumask_t cpu_initialized; 67cpumask_t cpu_initialized;
68cpumask_t cpu_sibling_setup_map; 68cpumask_t cpu_callin_map;
69 69
70#endif /* CONFIG_X86_32 */ 70#endif /* CONFIG_X86_32 */
71 71
72 72
73static struct cpu_dev *this_cpu __cpuinitdata; 73static const struct cpu_dev *this_cpu __cpuinitdata;
74 74
75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 75DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
76#ifdef CONFIG_X86_64 76#ifdef CONFIG_X86_64
@@ -79,48 +79,48 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
79 * IRET will check the segment types kkeil 2000/10/28 79 * IRET will check the segment types kkeil 2000/10/28
80 * Also sysret mandates a special GDT layout 80 * Also sysret mandates a special GDT layout
81 * 81 *
82 * The TLS descriptors are currently at a different place compared to i386. 82 * TLS descriptors are currently at a different place compared to i386.
83 * Hopefully nobody expects them at a fixed place (Wine?) 83 * Hopefully nobody expects them at a fixed place (Wine?)
84 */ 84 */
85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } }, 85 [GDT_ENTRY_KERNEL32_CS] = { { { 0x0000ffff, 0x00cf9b00 } } },
86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } }, 86 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00af9b00 } } },
87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } }, 87 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9300 } } },
88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } }, 88 [GDT_ENTRY_DEFAULT_USER32_CS] = { { { 0x0000ffff, 0x00cffb00 } } },
89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } }, 89 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff300 } } },
90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } }, 90 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00affb00 } } },
91#else 91#else
92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 92 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 93 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } }, 94 [GDT_ENTRY_DEFAULT_USER_CS] = { { { 0x0000ffff, 0x00cffa00 } } },
95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } }, 95 [GDT_ENTRY_DEFAULT_USER_DS] = { { { 0x0000ffff, 0x00cff200 } } },
96 /* 96 /*
97 * Segments used for calling PnP BIOS have byte granularity. 97 * Segments used for calling PnP BIOS have byte granularity.
98 * They code segments and data segments have fixed 64k limits, 98 * They code segments and data segments have fixed 64k limits,
99 * the transfer segment sizes are set at run time. 99 * the transfer segment sizes are set at run time.
100 */ 100 */
101 /* 32-bit code */ 101 /* 32-bit code */
102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } }, 102 [GDT_ENTRY_PNPBIOS_CS32] = { { { 0x0000ffff, 0x00409a00 } } },
103 /* 16-bit code */ 103 /* 16-bit code */
104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } }, 104 [GDT_ENTRY_PNPBIOS_CS16] = { { { 0x0000ffff, 0x00009a00 } } },
105 /* 16-bit data */ 105 /* 16-bit data */
106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } }, 106 [GDT_ENTRY_PNPBIOS_DS] = { { { 0x0000ffff, 0x00009200 } } },
107 /* 16-bit data */ 107 /* 16-bit data */
108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } }, 108 [GDT_ENTRY_PNPBIOS_TS1] = { { { 0x00000000, 0x00009200 } } },
109 /* 16-bit data */ 109 /* 16-bit data */
110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } }, 110 [GDT_ENTRY_PNPBIOS_TS2] = { { { 0x00000000, 0x00009200 } } },
111 /* 111 /*
112 * The APM segments have byte granularity and their bases 112 * The APM segments have byte granularity and their bases
113 * are set at run time. All have 64k limits. 113 * are set at run time. All have 64k limits.
114 */ 114 */
115 /* 32-bit code */ 115 /* 32-bit code */
116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } }, 116 [GDT_ENTRY_APMBIOS_BASE] = { { { 0x0000ffff, 0x00409a00 } } },
117 /* 16-bit code */ 117 /* 16-bit code */
118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } }, 118 [GDT_ENTRY_APMBIOS_BASE+1] = { { { 0x0000ffff, 0x00009a00 } } },
119 /* data */ 119 /* data */
120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } }, 120 [GDT_ENTRY_APMBIOS_BASE+2] = { { { 0x0000ffff, 0x00409200 } } },
121 121
122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } }, 122 [GDT_ENTRY_ESPFIX_SS] = { { { 0x00000000, 0x00c09200 } } },
123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } }, 123 [GDT_ENTRY_PERCPU] = { { { 0x0000ffff, 0x00cf9200 } } },
124 GDT_STACK_CANARY_INIT 124 GDT_STACK_CANARY_INIT
125#endif 125#endif
126} }; 126} };
@@ -164,16 +164,17 @@ static inline int flag_is_changeable_p(u32 flag)
164 * the CPUID. Add "volatile" to not allow gcc to 164 * the CPUID. Add "volatile" to not allow gcc to
165 * optimize the subsequent calls to this function. 165 * optimize the subsequent calls to this function.
166 */ 166 */
167 asm volatile ("pushfl\n\t" 167 asm volatile ("pushfl \n\t"
168 "pushfl\n\t" 168 "pushfl \n\t"
169 "popl %0\n\t" 169 "popl %0 \n\t"
170 "movl %0,%1\n\t" 170 "movl %0, %1 \n\t"
171 "xorl %2,%0\n\t" 171 "xorl %2, %0 \n\t"
172 "pushl %0\n\t" 172 "pushl %0 \n\t"
173 "popfl\n\t" 173 "popfl \n\t"
174 "pushfl\n\t" 174 "pushfl \n\t"
175 "popl %0\n\t" 175 "popl %0 \n\t"
176 "popfl\n\t" 176 "popfl \n\t"
177
177 : "=&r" (f1), "=&r" (f2) 178 : "=&r" (f1), "=&r" (f2)
178 : "ir" (flag)); 179 : "ir" (flag));
179 180
@@ -188,18 +189,22 @@ static int __cpuinit have_cpuid_p(void)
188 189
189static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 190static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
190{ 191{
191 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) { 192 unsigned long lo, hi;
192 /* Disable processor serial number */ 193
193 unsigned long lo, hi; 194 if (!cpu_has(c, X86_FEATURE_PN) || !disable_x86_serial_nr)
194 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 195 return;
195 lo |= 0x200000; 196
196 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi); 197 /* Disable processor serial number: */
197 printk(KERN_NOTICE "CPU serial number disabled.\n"); 198
198 clear_cpu_cap(c, X86_FEATURE_PN); 199 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
199 200 lo |= 0x200000;
200 /* Disabling the serial number may affect the cpuid level */ 201 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
201 c->cpuid_level = cpuid_eax(0); 202
202 } 203 printk(KERN_NOTICE "CPU serial number disabled.\n");
204 clear_cpu_cap(c, X86_FEATURE_PN);
205
206 /* Disabling the serial number may affect the cpuid level */
207 c->cpuid_level = cpuid_eax(0);
203} 208}
204 209
205static int __init x86_serial_nr_setup(char *s) 210static int __init x86_serial_nr_setup(char *s)
@@ -232,6 +237,7 @@ struct cpuid_dependent_feature {
232 u32 feature; 237 u32 feature;
233 u32 level; 238 u32 level;
234}; 239};
240
235static const struct cpuid_dependent_feature __cpuinitconst 241static const struct cpuid_dependent_feature __cpuinitconst
236cpuid_dependent_features[] = { 242cpuid_dependent_features[] = {
237 { X86_FEATURE_MWAIT, 0x00000005 }, 243 { X86_FEATURE_MWAIT, 0x00000005 },
@@ -243,7 +249,11 @@ cpuid_dependent_features[] = {
243static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 249static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
244{ 250{
245 const struct cpuid_dependent_feature *df; 251 const struct cpuid_dependent_feature *df;
252
246 for (df = cpuid_dependent_features; df->feature; df++) { 253 for (df = cpuid_dependent_features; df->feature; df++) {
254
255 if (!cpu_has(c, df->feature))
256 continue;
247 /* 257 /*
248 * Note: cpuid_level is set to -1 if unavailable, but 258 * Note: cpuid_level is set to -1 if unavailable, but
249 * extended_extended_level is set to 0 if unavailable 259 * extended_extended_level is set to 0 if unavailable
@@ -251,32 +261,32 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
251 * when signed; hence the weird messing around with 261 * when signed; hence the weird messing around with
252 * signs here... 262 * signs here...
253 */ 263 */
254 if (cpu_has(c, df->feature) && 264 if (!((s32)df->level < 0 ?
255 ((s32)df->level < 0 ?
256 (u32)df->level > (u32)c->extended_cpuid_level : 265 (u32)df->level > (u32)c->extended_cpuid_level :
257 (s32)df->level > (s32)c->cpuid_level)) { 266 (s32)df->level > (s32)c->cpuid_level))
258 clear_cpu_cap(c, df->feature); 267 continue;
259 if (warn) 268
260 printk(KERN_WARNING 269 clear_cpu_cap(c, df->feature);
261 "CPU: CPU feature %s disabled " 270 if (!warn)
262 "due to lack of CPUID level 0x%x\n", 271 continue;
263 x86_cap_flags[df->feature], 272
264 df->level); 273 printk(KERN_WARNING
265 } 274 "CPU: CPU feature %s disabled, no CPUID level 0x%x\n",
275 x86_cap_flags[df->feature], df->level);
266 } 276 }
267} 277}
268 278
269/* 279/*
270 * Naming convention should be: <Name> [(<Codename>)] 280 * Naming convention should be: <Name> [(<Codename>)]
271 * This table only is used unless init_<vendor>() below doesn't set it; 281 * This table only is used unless init_<vendor>() below doesn't set it;
272 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used 282 * in particular, if CPUID levels 0x80000002..4 are supported, this
273 * 283 * isn't used
274 */ 284 */
275 285
276/* Look up CPU names by table lookup. */ 286/* Look up CPU names by table lookup. */
277static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) 287static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
278{ 288{
279 struct cpu_model_info *info; 289 const struct cpu_model_info *info;
280 290
281 if (c->x86_model >= 16) 291 if (c->x86_model >= 16)
282 return NULL; /* Range check */ 292 return NULL; /* Range check */
@@ -307,8 +317,10 @@ void load_percpu_segment(int cpu)
307 load_stack_canary_segment(); 317 load_stack_canary_segment();
308} 318}
309 319
310/* Current gdt points %fs at the "master" per-cpu area: after this, 320/*
311 * it's on the real one. */ 321 * Current gdt points %fs at the "master" per-cpu area: after this,
322 * it's on the real one.
323 */
312void switch_to_new_gdt(int cpu) 324void switch_to_new_gdt(int cpu)
313{ 325{
314 struct desc_ptr gdt_descr; 326 struct desc_ptr gdt_descr;
@@ -321,7 +333,7 @@ void switch_to_new_gdt(int cpu)
321 load_percpu_segment(cpu); 333 load_percpu_segment(cpu);
322} 334}
323 335
324static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 336static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {};
325 337
326static void __cpuinit default_init(struct cpuinfo_x86 *c) 338static void __cpuinit default_init(struct cpuinfo_x86 *c)
327{ 339{
@@ -340,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
340#endif 352#endif
341} 353}
342 354
343static struct cpu_dev __cpuinitdata default_cpu = { 355static const struct cpu_dev __cpuinitconst default_cpu = {
344 .c_init = default_init, 356 .c_init = default_init,
345 .c_vendor = "Unknown", 357 .c_vendor = "Unknown",
346 .c_x86_vendor = X86_VENDOR_UNKNOWN, 358 .c_x86_vendor = X86_VENDOR_UNKNOWN,
@@ -354,22 +366,24 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
354 if (c->extended_cpuid_level < 0x80000004) 366 if (c->extended_cpuid_level < 0x80000004)
355 return; 367 return;
356 368
357 v = (unsigned int *) c->x86_model_id; 369 v = (unsigned int *)c->x86_model_id;
358 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]); 370 cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
359 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]); 371 cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
360 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]); 372 cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
361 c->x86_model_id[48] = 0; 373 c->x86_model_id[48] = 0;
362 374
363 /* Intel chips right-justify this string for some dumb reason; 375 /*
364 undo that brain damage */ 376 * Intel chips right-justify this string for some dumb reason;
377 * undo that brain damage:
378 */
365 p = q = &c->x86_model_id[0]; 379 p = q = &c->x86_model_id[0];
366 while (*p == ' ') 380 while (*p == ' ')
367 p++; 381 p++;
368 if (p != q) { 382 if (p != q) {
369 while (*p) 383 while (*p)
370 *q++ = *p++; 384 *q++ = *p++;
371 while (q <= &c->x86_model_id[48]) 385 while (q <= &c->x86_model_id[48])
372 *q++ = '\0'; /* Zero-pad the rest */ 386 *q++ = '\0'; /* Zero-pad the rest */
373 } 387 }
374} 388}
375 389
@@ -438,27 +452,30 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
438 452
439 if (smp_num_siblings == 1) { 453 if (smp_num_siblings == 1) {
440 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 454 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
441 } else if (smp_num_siblings > 1) { 455 goto out;
456 }
442 457
443 if (smp_num_siblings > nr_cpu_ids) { 458 if (smp_num_siblings <= 1)
444 printk(KERN_WARNING "CPU: Unsupported number of siblings %d", 459 goto out;
445 smp_num_siblings); 460
446 smp_num_siblings = 1; 461 if (smp_num_siblings > nr_cpu_ids) {
447 return; 462 pr_warning("CPU: Unsupported number of siblings %d",
448 } 463 smp_num_siblings);
464 smp_num_siblings = 1;
465 return;
466 }
449 467
450 index_msb = get_count_order(smp_num_siblings); 468 index_msb = get_count_order(smp_num_siblings);
451 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb); 469 c->phys_proc_id = apic->phys_pkg_id(c->initial_apicid, index_msb);
452 470
453 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 471 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
454 472
455 index_msb = get_count_order(smp_num_siblings); 473 index_msb = get_count_order(smp_num_siblings);
456 474
457 core_bits = get_count_order(c->x86_max_cores); 475 core_bits = get_count_order(c->x86_max_cores);
458 476
459 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) & 477 c->cpu_core_id = apic->phys_pkg_id(c->initial_apicid, index_msb) &
460 ((1 << core_bits) - 1); 478 ((1 << core_bits) - 1);
461 }
462 479
463out: 480out:
464 if ((c->x86_max_cores * smp_num_siblings) > 1) { 481 if ((c->x86_max_cores * smp_num_siblings) > 1) {
@@ -473,8 +490,8 @@ out:
473static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 490static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
474{ 491{
475 char *v = c->x86_vendor_id; 492 char *v = c->x86_vendor_id;
476 int i;
477 static int printed; 493 static int printed;
494 int i;
478 495
479 for (i = 0; i < X86_VENDOR_NUM; i++) { 496 for (i = 0; i < X86_VENDOR_NUM; i++) {
480 if (!cpu_devs[i]) 497 if (!cpu_devs[i])
@@ -483,6 +500,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
483 if (!strcmp(v, cpu_devs[i]->c_ident[0]) || 500 if (!strcmp(v, cpu_devs[i]->c_ident[0]) ||
484 (cpu_devs[i]->c_ident[1] && 501 (cpu_devs[i]->c_ident[1] &&
485 !strcmp(v, cpu_devs[i]->c_ident[1]))) { 502 !strcmp(v, cpu_devs[i]->c_ident[1]))) {
503
486 this_cpu = cpu_devs[i]; 504 this_cpu = cpu_devs[i];
487 c->x86_vendor = this_cpu->c_x86_vendor; 505 c->x86_vendor = this_cpu->c_x86_vendor;
488 return; 506 return;
@@ -491,7 +509,9 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
491 509
492 if (!printed) { 510 if (!printed) {
493 printed++; 511 printed++;
494 printk(KERN_ERR "CPU: vendor_id '%s' unknown, using generic init.\n", v); 512 printk(KERN_ERR
513 "CPU: vendor_id '%s' unknown, using generic init.\n", v);
514
495 printk(KERN_ERR "CPU: Your system may be unstable.\n"); 515 printk(KERN_ERR "CPU: Your system may be unstable.\n");
496 } 516 }
497 517
@@ -511,14 +531,17 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
511 /* Intel-defined flags: level 0x00000001 */ 531 /* Intel-defined flags: level 0x00000001 */
512 if (c->cpuid_level >= 0x00000001) { 532 if (c->cpuid_level >= 0x00000001) {
513 u32 junk, tfms, cap0, misc; 533 u32 junk, tfms, cap0, misc;
534
514 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 535 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
515 c->x86 = (tfms >> 8) & 0xf; 536 c->x86 = (tfms >> 8) & 0xf;
516 c->x86_model = (tfms >> 4) & 0xf; 537 c->x86_model = (tfms >> 4) & 0xf;
517 c->x86_mask = tfms & 0xf; 538 c->x86_mask = tfms & 0xf;
539
518 if (c->x86 == 0xf) 540 if (c->x86 == 0xf)
519 c->x86 += (tfms >> 20) & 0xff; 541 c->x86 += (tfms >> 20) & 0xff;
520 if (c->x86 >= 0x6) 542 if (c->x86 >= 0x6)
521 c->x86_model += ((tfms >> 16) & 0xf) << 4; 543 c->x86_model += ((tfms >> 16) & 0xf) << 4;
544
522 if (cap0 & (1<<19)) { 545 if (cap0 & (1<<19)) {
523 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8; 546 c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
524 c->x86_cache_alignment = c->x86_clflush_size; 547 c->x86_cache_alignment = c->x86_clflush_size;
@@ -534,6 +557,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
534 /* Intel-defined flags: level 0x00000001 */ 557 /* Intel-defined flags: level 0x00000001 */
535 if (c->cpuid_level >= 0x00000001) { 558 if (c->cpuid_level >= 0x00000001) {
536 u32 capability, excap; 559 u32 capability, excap;
560
537 cpuid(0x00000001, &tfms, &ebx, &excap, &capability); 561 cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
538 c->x86_capability[0] = capability; 562 c->x86_capability[0] = capability;
539 c->x86_capability[4] = excap; 563 c->x86_capability[4] = excap;
@@ -542,6 +566,7 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
542 /* AMD-defined flags: level 0x80000001 */ 566 /* AMD-defined flags: level 0x80000001 */
543 xlvl = cpuid_eax(0x80000000); 567 xlvl = cpuid_eax(0x80000000);
544 c->extended_cpuid_level = xlvl; 568 c->extended_cpuid_level = xlvl;
569
545 if ((xlvl & 0xffff0000) == 0x80000000) { 570 if ((xlvl & 0xffff0000) == 0x80000000) {
546 if (xlvl >= 0x80000001) { 571 if (xlvl >= 0x80000001) {
547 c->x86_capability[1] = cpuid_edx(0x80000001); 572 c->x86_capability[1] = cpuid_edx(0x80000001);
@@ -640,12 +665,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
640 665
641void __init early_cpu_init(void) 666void __init early_cpu_init(void)
642{ 667{
643 struct cpu_dev **cdev; 668 const struct cpu_dev *const *cdev;
644 int count = 0; 669 int count = 0;
645 670
646 printk("KERNEL supported cpus:\n"); 671 printk(KERN_INFO "KERNEL supported cpus:\n");
647 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { 672 for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) {
648 struct cpu_dev *cpudev = *cdev; 673 const struct cpu_dev *cpudev = *cdev;
649 unsigned int j; 674 unsigned int j;
650 675
651 if (count >= X86_VENDOR_NUM) 676 if (count >= X86_VENDOR_NUM)
@@ -656,7 +681,7 @@ void __init early_cpu_init(void)
656 for (j = 0; j < 2; j++) { 681 for (j = 0; j < 2; j++) {
657 if (!cpudev->c_ident[j]) 682 if (!cpudev->c_ident[j])
658 continue; 683 continue;
659 printk(" %s %s\n", cpudev->c_vendor, 684 printk(KERN_INFO " %s %s\n", cpudev->c_vendor,
660 cpudev->c_ident[j]); 685 cpudev->c_ident[j]);
661 } 686 }
662 } 687 }
@@ -769,8 +794,8 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
769 squash_the_stupid_serial_number(c); 794 squash_the_stupid_serial_number(c);
770 795
771 /* 796 /*
772 * The vendor-specific functions might have changed features. Now 797 * The vendor-specific functions might have changed features.
773 * we do "generic changes." 798 * Now we do "generic changes."
774 */ 799 */
775 800
776 /* Filter out anything that depends on CPUID levels we don't have */ 801 /* Filter out anything that depends on CPUID levels we don't have */
@@ -778,7 +803,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
778 803
779 /* If the model name is still unset, do table lookup. */ 804 /* If the model name is still unset, do table lookup. */
780 if (!c->x86_model_id[0]) { 805 if (!c->x86_model_id[0]) {
781 char *p; 806 const char *p;
782 p = table_lookup_model(c); 807 p = table_lookup_model(c);
783 if (p) 808 if (p)
784 strcpy(c->x86_model_id, p); 809 strcpy(c->x86_model_id, p);
@@ -853,11 +878,11 @@ void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c)
853} 878}
854 879
855struct msr_range { 880struct msr_range {
856 unsigned min; 881 unsigned min;
857 unsigned max; 882 unsigned max;
858}; 883};
859 884
860static struct msr_range msr_range_array[] __cpuinitdata = { 885static const struct msr_range msr_range_array[] __cpuinitconst = {
861 { 0x00000000, 0x00000418}, 886 { 0x00000000, 0x00000418},
862 { 0xc0000000, 0xc000040b}, 887 { 0xc0000000, 0xc000040b},
863 { 0xc0010000, 0xc0010142}, 888 { 0xc0010000, 0xc0010142},
@@ -866,14 +891,15 @@ static struct msr_range msr_range_array[] __cpuinitdata = {
866 891
867static void __cpuinit print_cpu_msr(void) 892static void __cpuinit print_cpu_msr(void)
868{ 893{
894 unsigned index_min, index_max;
869 unsigned index; 895 unsigned index;
870 u64 val; 896 u64 val;
871 int i; 897 int i;
872 unsigned index_min, index_max;
873 898
874 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) { 899 for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
875 index_min = msr_range_array[i].min; 900 index_min = msr_range_array[i].min;
876 index_max = msr_range_array[i].max; 901 index_max = msr_range_array[i].max;
902
877 for (index = index_min; index < index_max; index++) { 903 for (index = index_min; index < index_max; index++) {
878 if (rdmsrl_amd_safe(index, &val)) 904 if (rdmsrl_amd_safe(index, &val))
879 continue; 905 continue;
@@ -883,6 +909,7 @@ static void __cpuinit print_cpu_msr(void)
883} 909}
884 910
885static int show_msr __cpuinitdata; 911static int show_msr __cpuinitdata;
912
886static __init int setup_show_msr(char *arg) 913static __init int setup_show_msr(char *arg)
887{ 914{
888 int num; 915 int num;
@@ -904,12 +931,14 @@ __setup("noclflush", setup_noclflush);
904 931
905void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 932void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
906{ 933{
907 char *vendor = NULL; 934 const char *vendor = NULL;
908 935
909 if (c->x86_vendor < X86_VENDOR_NUM) 936 if (c->x86_vendor < X86_VENDOR_NUM) {
910 vendor = this_cpu->c_vendor; 937 vendor = this_cpu->c_vendor;
911 else if (c->cpuid_level >= 0) 938 } else {
912 vendor = c->x86_vendor_id; 939 if (c->cpuid_level >= 0)
940 vendor = c->x86_vendor_id;
941 }
913 942
914 if (vendor && !strstr(c->x86_model_id, vendor)) 943 if (vendor && !strstr(c->x86_model_id, vendor))
915 printk(KERN_CONT "%s ", vendor); 944 printk(KERN_CONT "%s ", vendor);
@@ -936,10 +965,12 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
936static __init int setup_disablecpuid(char *arg) 965static __init int setup_disablecpuid(char *arg)
937{ 966{
938 int bit; 967 int bit;
968
939 if (get_option(&arg, &bit) && bit < NCAPINTS*32) 969 if (get_option(&arg, &bit) && bit < NCAPINTS*32)
940 setup_clear_cpu_cap(bit); 970 setup_clear_cpu_cap(bit);
941 else 971 else
942 return 0; 972 return 0;
973
943 return 1; 974 return 1;
944} 975}
945__setup("clearcpuid=", setup_disablecpuid); 976__setup("clearcpuid=", setup_disablecpuid);
@@ -949,6 +980,7 @@ struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
949 980
950DEFINE_PER_CPU_FIRST(union irq_stack_union, 981DEFINE_PER_CPU_FIRST(union irq_stack_union,
951 irq_stack_union) __aligned(PAGE_SIZE); 982 irq_stack_union) __aligned(PAGE_SIZE);
983
952DEFINE_PER_CPU(char *, irq_stack_ptr) = 984DEFINE_PER_CPU(char *, irq_stack_ptr) =
953 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64; 985 init_per_cpu_var(irq_stack_union.irq_stack) + IRQ_STACK_SIZE - 64;
954 986
@@ -958,12 +990,21 @@ EXPORT_PER_CPU_SYMBOL(kernel_stack);
958 990
959DEFINE_PER_CPU(unsigned int, irq_count) = -1; 991DEFINE_PER_CPU(unsigned int, irq_count) = -1;
960 992
993/*
994 * Special IST stacks which the CPU switches to when it calls
995 * an IST-marked descriptor entry. Up to 7 stacks (hardware
996 * limit), all of them are 4K, except the debug stack which
997 * is 8K.
998 */
999static const unsigned int exception_stack_sizes[N_EXCEPTION_STACKS] = {
1000 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1001 [DEBUG_STACK - 1] = DEBUG_STKSZ
1002};
1003
961static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks 1004static DEFINE_PER_CPU_PAGE_ALIGNED(char, exception_stacks
962 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]) 1005 [(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ])
963 __aligned(PAGE_SIZE); 1006 __aligned(PAGE_SIZE);
964 1007
965extern asmlinkage void ignore_sysret(void);
966
967/* May not be marked __init: used by software suspend */ 1008/* May not be marked __init: used by software suspend */
968void syscall_init(void) 1009void syscall_init(void)
969{ 1010{
@@ -993,7 +1034,7 @@ unsigned long kernel_eflags;
993 */ 1034 */
994DEFINE_PER_CPU(struct orig_ist, orig_ist); 1035DEFINE_PER_CPU(struct orig_ist, orig_ist);
995 1036
996#else /* x86_64 */ 1037#else /* CONFIG_X86_64 */
997 1038
998#ifdef CONFIG_CC_STACKPROTECTOR 1039#ifdef CONFIG_CC_STACKPROTECTOR
999DEFINE_PER_CPU(unsigned long, stack_canary); 1040DEFINE_PER_CPU(unsigned long, stack_canary);
@@ -1005,9 +1046,26 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
1005 memset(regs, 0, sizeof(struct pt_regs)); 1046 memset(regs, 0, sizeof(struct pt_regs));
1006 regs->fs = __KERNEL_PERCPU; 1047 regs->fs = __KERNEL_PERCPU;
1007 regs->gs = __KERNEL_STACK_CANARY; 1048 regs->gs = __KERNEL_STACK_CANARY;
1049
1008 return regs; 1050 return regs;
1009} 1051}
1010#endif /* x86_64 */ 1052#endif /* CONFIG_X86_64 */
1053
1054/*
1055 * Clear all 6 debug registers:
1056 */
1057static void clear_all_debug_regs(void)
1058{
1059 int i;
1060
1061 for (i = 0; i < 8; i++) {
1062 /* Ignore db4, db5 */
1063 if ((i == 4) || (i == 5))
1064 continue;
1065
1066 set_debugreg(0, i);
1067 }
1068}
1011 1069
1012/* 1070/*
1013 * cpu_init() initializes state that is per-CPU. Some data is already 1071 * cpu_init() initializes state that is per-CPU. Some data is already
@@ -1017,15 +1075,20 @@ struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
1017 * A lot of state is already set up in PDA init for 64 bit 1075 * A lot of state is already set up in PDA init for 64 bit
1018 */ 1076 */
1019#ifdef CONFIG_X86_64 1077#ifdef CONFIG_X86_64
1078
1020void __cpuinit cpu_init(void) 1079void __cpuinit cpu_init(void)
1021{ 1080{
1022 int cpu = stack_smp_processor_id(); 1081 struct orig_ist *orig_ist;
1023 struct tss_struct *t = &per_cpu(init_tss, cpu);
1024 struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
1025 unsigned long v;
1026 struct task_struct *me; 1082 struct task_struct *me;
1083 struct tss_struct *t;
1084 unsigned long v;
1085 int cpu;
1027 int i; 1086 int i;
1028 1087
1088 cpu = stack_smp_processor_id();
1089 t = &per_cpu(init_tss, cpu);
1090 orig_ist = &per_cpu(orig_ist, cpu);
1091
1029#ifdef CONFIG_NUMA 1092#ifdef CONFIG_NUMA
1030 if (cpu != 0 && percpu_read(node_number) == 0 && 1093 if (cpu != 0 && percpu_read(node_number) == 0 &&
1031 cpu_to_node(cpu) != NUMA_NO_NODE) 1094 cpu_to_node(cpu) != NUMA_NO_NODE)
@@ -1066,19 +1129,17 @@ void __cpuinit cpu_init(void)
1066 * set up and load the per-CPU TSS 1129 * set up and load the per-CPU TSS
1067 */ 1130 */
1068 if (!orig_ist->ist[0]) { 1131 if (!orig_ist->ist[0]) {
1069 static const unsigned int sizes[N_EXCEPTION_STACKS] = {
1070 [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STKSZ,
1071 [DEBUG_STACK - 1] = DEBUG_STKSZ
1072 };
1073 char *estacks = per_cpu(exception_stacks, cpu); 1132 char *estacks = per_cpu(exception_stacks, cpu);
1133
1074 for (v = 0; v < N_EXCEPTION_STACKS; v++) { 1134 for (v = 0; v < N_EXCEPTION_STACKS; v++) {
1075 estacks += sizes[v]; 1135 estacks += exception_stack_sizes[v];
1076 orig_ist->ist[v] = t->x86_tss.ist[v] = 1136 orig_ist->ist[v] = t->x86_tss.ist[v] =
1077 (unsigned long)estacks; 1137 (unsigned long)estacks;
1078 } 1138 }
1079 } 1139 }
1080 1140
1081 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1141 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1142
1082 /* 1143 /*
1083 * <= is required because the CPU will access up to 1144 * <= is required because the CPU will access up to
1084 * 8 bits beyond the end of the IO permission bitmap. 1145 * 8 bits beyond the end of the IO permission bitmap.
@@ -1088,8 +1149,7 @@ void __cpuinit cpu_init(void)
1088 1149
1089 atomic_inc(&init_mm.mm_count); 1150 atomic_inc(&init_mm.mm_count);
1090 me->active_mm = &init_mm; 1151 me->active_mm = &init_mm;
1091 if (me->mm) 1152 BUG_ON(me->mm);
1092 BUG();
1093 enter_lazy_tlb(&init_mm, me); 1153 enter_lazy_tlb(&init_mm, me);
1094 1154
1095 load_sp0(t, &current->thread); 1155 load_sp0(t, &current->thread);
@@ -1108,17 +1168,7 @@ void __cpuinit cpu_init(void)
1108 arch_kgdb_ops.correct_hw_break(); 1168 arch_kgdb_ops.correct_hw_break();
1109 else 1169 else
1110#endif 1170#endif
1111 { 1171 clear_all_debug_regs();
1112 /*
1113 * Clear all 6 debug registers:
1114 */
1115 set_debugreg(0UL, 0);
1116 set_debugreg(0UL, 1);
1117 set_debugreg(0UL, 2);
1118 set_debugreg(0UL, 3);
1119 set_debugreg(0UL, 6);
1120 set_debugreg(0UL, 7);
1121 }
1122 1172
1123 fpu_init(); 1173 fpu_init();
1124 1174
@@ -1139,7 +1189,8 @@ void __cpuinit cpu_init(void)
1139 1189
1140 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { 1190 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
1141 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); 1191 printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
1142 for (;;) local_irq_enable(); 1192 for (;;)
1193 local_irq_enable();
1143 } 1194 }
1144 1195
1145 printk(KERN_INFO "Initializing CPU#%d\n", cpu); 1196 printk(KERN_INFO "Initializing CPU#%d\n", cpu);
@@ -1155,8 +1206,7 @@ void __cpuinit cpu_init(void)
1155 */ 1206 */
1156 atomic_inc(&init_mm.mm_count); 1207 atomic_inc(&init_mm.mm_count);
1157 curr->active_mm = &init_mm; 1208 curr->active_mm = &init_mm;
1158 if (curr->mm) 1209 BUG_ON(curr->mm);
1159 BUG();
1160 enter_lazy_tlb(&init_mm, curr); 1210 enter_lazy_tlb(&init_mm, curr);
1161 1211
1162 load_sp0(t, thread); 1212 load_sp0(t, thread);
@@ -1169,13 +1219,7 @@ void __cpuinit cpu_init(void)
1169 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss); 1219 __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
1170#endif 1220#endif
1171 1221
1172 /* Clear all 6 debug registers: */ 1222 clear_all_debug_regs();
1173 set_debugreg(0, 0);
1174 set_debugreg(0, 1);
1175 set_debugreg(0, 2);
1176 set_debugreg(0, 3);
1177 set_debugreg(0, 6);
1178 set_debugreg(0, 7);
1179 1223
1180 /* 1224 /*
1181 * Force FPU initialization: 1225 * Force FPU initialization:
@@ -1195,6 +1239,4 @@ void __cpuinit cpu_init(void)
1195 1239
1196 xsave_init(); 1240 xsave_init();
1197} 1241}
1198
1199
1200#endif 1242#endif
diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h
index de4094a3921..9469ecb5aeb 100644
--- a/arch/x86/kernel/cpu/cpu.h
+++ b/arch/x86/kernel/cpu/cpu.h
@@ -5,15 +5,15 @@
5struct cpu_model_info { 5struct cpu_model_info {
6 int vendor; 6 int vendor;
7 int family; 7 int family;
8 char *model_names[16]; 8 const char *model_names[16];
9}; 9};
10 10
11/* attempt to consolidate cpu attributes */ 11/* attempt to consolidate cpu attributes */
12struct cpu_dev { 12struct cpu_dev {
13 char * c_vendor; 13 const char * c_vendor;
14 14
15 /* some have two possibilities for cpuid string */ 15 /* some have two possibilities for cpuid string */
16 char * c_ident[2]; 16 const char * c_ident[2];
17 17
18 struct cpu_model_info c_models[4]; 18 struct cpu_model_info c_models[4];
19 19
@@ -25,11 +25,12 @@ struct cpu_dev {
25}; 25};
26 26
27#define cpu_dev_register(cpu_devX) \ 27#define cpu_dev_register(cpu_devX) \
28 static struct cpu_dev *__cpu_dev_##cpu_devX __used \ 28 static const struct cpu_dev *const __cpu_dev_##cpu_devX __used \
29 __attribute__((__section__(".x86_cpu_dev.init"))) = \ 29 __attribute__((__section__(".x86_cpu_dev.init"))) = \
30 &cpu_devX; 30 &cpu_devX;
31 31
32extern struct cpu_dev *__x86_cpu_dev_start[], *__x86_cpu_dev_end[]; 32extern const struct cpu_dev *const __x86_cpu_dev_start[],
33 *const __x86_cpu_dev_end[];
33 34
34extern void display_cacheinfo(struct cpuinfo_x86 *c); 35extern void display_cacheinfo(struct cpuinfo_x86 *c);
35 36
diff --git a/arch/x86/kernel/cpu/cpu_debug.c b/arch/x86/kernel/cpu/cpu_debug.c
new file mode 100755
index 00000000000..21c0cf8ced1
--- /dev/null
+++ b/arch/x86/kernel/cpu/cpu_debug.c
@@ -0,0 +1,839 @@
1/*
2 * CPU x86 architecture debug code
3 *
4 * Copyright(C) 2009 Jaswinder Singh Rajput
5 *
6 * For licencing details see kernel-base/COPYING
7 */
8
9#include <linux/interrupt.h>
10#include <linux/compiler.h>
11#include <linux/seq_file.h>
12#include <linux/debugfs.h>
13#include <linux/kprobes.h>
14#include <linux/uaccess.h>
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/percpu.h>
18#include <linux/signal.h>
19#include <linux/errno.h>
20#include <linux/sched.h>
21#include <linux/types.h>
22#include <linux/init.h>
23#include <linux/slab.h>
24#include <linux/smp.h>
25
26#include <asm/cpu_debug.h>
27#include <asm/paravirt.h>
28#include <asm/system.h>
29#include <asm/traps.h>
30#include <asm/apic.h>
31#include <asm/desc.h>
32
33static DEFINE_PER_CPU(struct cpu_cpuX_base, cpu_arr[CPU_REG_ALL_BIT]);
34static DEFINE_PER_CPU(struct cpu_private *, priv_arr[MAX_CPU_FILES]);
35static DEFINE_PER_CPU(unsigned, cpu_modelflag);
36static DEFINE_PER_CPU(int, cpu_priv_count);
37static DEFINE_PER_CPU(unsigned, cpu_model);
38
39static DEFINE_MUTEX(cpu_debug_lock);
40
41static struct dentry *cpu_debugfs_dir;
42
43static struct cpu_debug_base cpu_base[] = {
44 { "mc", CPU_MC, 0 },
45 { "monitor", CPU_MONITOR, 0 },
46 { "time", CPU_TIME, 0 },
47 { "pmc", CPU_PMC, 1 },
48 { "platform", CPU_PLATFORM, 0 },
49 { "apic", CPU_APIC, 0 },
50 { "poweron", CPU_POWERON, 0 },
51 { "control", CPU_CONTROL, 0 },
52 { "features", CPU_FEATURES, 0 },
53 { "lastbranch", CPU_LBRANCH, 0 },
54 { "bios", CPU_BIOS, 0 },
55 { "freq", CPU_FREQ, 0 },
56 { "mtrr", CPU_MTRR, 0 },
57 { "perf", CPU_PERF, 0 },
58 { "cache", CPU_CACHE, 0 },
59 { "sysenter", CPU_SYSENTER, 0 },
60 { "therm", CPU_THERM, 0 },
61 { "misc", CPU_MISC, 0 },
62 { "debug", CPU_DEBUG, 0 },
63 { "pat", CPU_PAT, 0 },
64 { "vmx", CPU_VMX, 0 },
65 { "call", CPU_CALL, 0 },
66 { "base", CPU_BASE, 0 },
67 { "smm", CPU_SMM, 0 },
68 { "svm", CPU_SVM, 0 },
69 { "osvm", CPU_OSVM, 0 },
70 { "tss", CPU_TSS, 0 },
71 { "cr", CPU_CR, 0 },
72 { "dt", CPU_DT, 0 },
73 { "registers", CPU_REG_ALL, 0 },
74};
75
76static struct cpu_file_base cpu_file[] = {
77 { "index", CPU_REG_ALL, 0 },
78 { "value", CPU_REG_ALL, 1 },
79};
80
81/* Intel Registers Range */
82static struct cpu_debug_range cpu_intel_range[] = {
83 { 0x00000000, 0x00000001, CPU_MC, CPU_INTEL_ALL },
84 { 0x00000006, 0x00000007, CPU_MONITOR, CPU_CX_AT_XE },
85 { 0x00000010, 0x00000010, CPU_TIME, CPU_INTEL_ALL },
86 { 0x00000011, 0x00000013, CPU_PMC, CPU_INTEL_PENTIUM },
87 { 0x00000017, 0x00000017, CPU_PLATFORM, CPU_PX_CX_AT_XE },
88 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_P6_CX_AT_XE },
89
90 { 0x0000002A, 0x0000002A, CPU_POWERON, CPU_PX_CX_AT_XE },
91 { 0x0000002B, 0x0000002B, CPU_POWERON, CPU_INTEL_XEON },
92 { 0x0000002C, 0x0000002C, CPU_FREQ, CPU_INTEL_XEON },
93 { 0x0000003A, 0x0000003A, CPU_CONTROL, CPU_CX_AT_XE },
94
95 { 0x00000040, 0x00000043, CPU_LBRANCH, CPU_PM_CX_AT_XE },
96 { 0x00000044, 0x00000047, CPU_LBRANCH, CPU_PM_CO_AT },
97 { 0x00000060, 0x00000063, CPU_LBRANCH, CPU_C2_AT },
98 { 0x00000064, 0x00000067, CPU_LBRANCH, CPU_INTEL_ATOM },
99
100 { 0x00000079, 0x00000079, CPU_BIOS, CPU_P6_CX_AT_XE },
101 { 0x00000088, 0x0000008A, CPU_CACHE, CPU_INTEL_P6 },
102 { 0x0000008B, 0x0000008B, CPU_BIOS, CPU_P6_CX_AT_XE },
103 { 0x0000009B, 0x0000009B, CPU_MONITOR, CPU_INTEL_XEON },
104
105 { 0x000000C1, 0x000000C2, CPU_PMC, CPU_P6_CX_AT },
106 { 0x000000CD, 0x000000CD, CPU_FREQ, CPU_CX_AT },
107 { 0x000000E7, 0x000000E8, CPU_PERF, CPU_CX_AT },
108 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_P6_CX_XE },
109
110 { 0x00000116, 0x00000116, CPU_CACHE, CPU_INTEL_P6 },
111 { 0x00000118, 0x00000118, CPU_CACHE, CPU_INTEL_P6 },
112 { 0x00000119, 0x00000119, CPU_CACHE, CPU_INTEL_PX },
113 { 0x0000011A, 0x0000011B, CPU_CACHE, CPU_INTEL_P6 },
114 { 0x0000011E, 0x0000011E, CPU_CACHE, CPU_PX_CX_AT },
115
116 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_P6_CX_AT_XE },
117 { 0x00000179, 0x0000017A, CPU_MC, CPU_PX_CX_AT_XE },
118 { 0x0000017B, 0x0000017B, CPU_MC, CPU_P6_XE },
119 { 0x00000186, 0x00000187, CPU_PMC, CPU_P6_CX_AT },
120 { 0x00000198, 0x00000199, CPU_PERF, CPU_PM_CX_AT_XE },
121 { 0x0000019A, 0x0000019A, CPU_TIME, CPU_PM_CX_AT_XE },
122 { 0x0000019B, 0x0000019D, CPU_THERM, CPU_PM_CX_AT_XE },
123 { 0x000001A0, 0x000001A0, CPU_MISC, CPU_PM_CX_AT_XE },
124
125 { 0x000001C9, 0x000001C9, CPU_LBRANCH, CPU_PM_CX_AT },
126 { 0x000001D7, 0x000001D8, CPU_LBRANCH, CPU_INTEL_XEON },
127 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_CX_AT_XE },
128 { 0x000001DA, 0x000001DA, CPU_LBRANCH, CPU_INTEL_XEON },
129 { 0x000001DB, 0x000001DB, CPU_LBRANCH, CPU_P6_XE },
130 { 0x000001DC, 0x000001DC, CPU_LBRANCH, CPU_INTEL_P6 },
131 { 0x000001DD, 0x000001DE, CPU_LBRANCH, CPU_PX_CX_AT_XE },
132 { 0x000001E0, 0x000001E0, CPU_LBRANCH, CPU_INTEL_P6 },
133
134 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_P6_CX_XE },
135 { 0x00000250, 0x00000250, CPU_MTRR, CPU_P6_CX_XE },
136 { 0x00000258, 0x00000259, CPU_MTRR, CPU_P6_CX_XE },
137 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_P6_CX_XE },
138 { 0x00000277, 0x00000277, CPU_PAT, CPU_C2_AT_XE },
139 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_P6_CX_XE },
140
141 { 0x00000300, 0x00000308, CPU_PMC, CPU_INTEL_XEON },
142 { 0x00000309, 0x0000030B, CPU_PMC, CPU_C2_AT_XE },
143 { 0x0000030C, 0x00000311, CPU_PMC, CPU_INTEL_XEON },
144 { 0x00000345, 0x00000345, CPU_PMC, CPU_C2_AT },
145 { 0x00000360, 0x00000371, CPU_PMC, CPU_INTEL_XEON },
146 { 0x0000038D, 0x00000390, CPU_PMC, CPU_C2_AT },
147 { 0x000003A0, 0x000003BE, CPU_PMC, CPU_INTEL_XEON },
148 { 0x000003C0, 0x000003CD, CPU_PMC, CPU_INTEL_XEON },
149 { 0x000003E0, 0x000003E1, CPU_PMC, CPU_INTEL_XEON },
150 { 0x000003F0, 0x000003F0, CPU_PMC, CPU_INTEL_XEON },
151 { 0x000003F1, 0x000003F1, CPU_PMC, CPU_C2_AT_XE },
152 { 0x000003F2, 0x000003F2, CPU_PMC, CPU_INTEL_XEON },
153
154 { 0x00000400, 0x00000402, CPU_MC, CPU_PM_CX_AT_XE },
155 { 0x00000403, 0x00000403, CPU_MC, CPU_INTEL_XEON },
156 { 0x00000404, 0x00000406, CPU_MC, CPU_PM_CX_AT_XE },
157 { 0x00000407, 0x00000407, CPU_MC, CPU_INTEL_XEON },
158 { 0x00000408, 0x0000040A, CPU_MC, CPU_PM_CX_AT_XE },
159 { 0x0000040B, 0x0000040B, CPU_MC, CPU_INTEL_XEON },
160 { 0x0000040C, 0x0000040E, CPU_MC, CPU_PM_CX_XE },
161 { 0x0000040F, 0x0000040F, CPU_MC, CPU_INTEL_XEON },
162 { 0x00000410, 0x00000412, CPU_MC, CPU_PM_CX_AT_XE },
163 { 0x00000413, 0x00000417, CPU_MC, CPU_CX_AT_XE },
164 { 0x00000480, 0x0000048B, CPU_VMX, CPU_CX_AT_XE },
165
166 { 0x00000600, 0x00000600, CPU_DEBUG, CPU_PM_CX_AT_XE },
167 { 0x00000680, 0x0000068F, CPU_LBRANCH, CPU_INTEL_XEON },
168 { 0x000006C0, 0x000006CF, CPU_LBRANCH, CPU_INTEL_XEON },
169
170 { 0x000107CC, 0x000107D3, CPU_PMC, CPU_INTEL_XEON_MP },
171
172 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_INTEL_XEON },
173 { 0xC0000081, 0xC0000082, CPU_CALL, CPU_INTEL_XEON },
174 { 0xC0000084, 0xC0000084, CPU_CALL, CPU_INTEL_XEON },
175 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_INTEL_XEON },
176};
177
178/* AMD Registers Range */
179static struct cpu_debug_range cpu_amd_range[] = {
180 { 0x00000010, 0x00000010, CPU_TIME, CPU_ALL, },
181 { 0x0000001B, 0x0000001B, CPU_APIC, CPU_ALL, },
182 { 0x000000FE, 0x000000FE, CPU_MTRR, CPU_ALL, },
183
184 { 0x00000174, 0x00000176, CPU_SYSENTER, CPU_ALL, },
185 { 0x00000179, 0x0000017A, CPU_MC, CPU_ALL, },
186 { 0x0000017B, 0x0000017B, CPU_MC, CPU_ALL, },
187 { 0x000001D9, 0x000001D9, CPU_DEBUG, CPU_ALL, },
188 { 0x000001DB, 0x000001DE, CPU_LBRANCH, CPU_ALL, },
189
190 { 0x00000200, 0x0000020F, CPU_MTRR, CPU_ALL, },
191 { 0x00000250, 0x00000250, CPU_MTRR, CPU_ALL, },
192 { 0x00000258, 0x00000259, CPU_MTRR, CPU_ALL, },
193 { 0x00000268, 0x0000026F, CPU_MTRR, CPU_ALL, },
194 { 0x00000277, 0x00000277, CPU_PAT, CPU_ALL, },
195 { 0x000002FF, 0x000002FF, CPU_MTRR, CPU_ALL, },
196
197 { 0x00000400, 0x00000417, CPU_MC, CPU_ALL, },
198
199 { 0xC0000080, 0xC0000080, CPU_FEATURES, CPU_ALL, },
200 { 0xC0000081, 0xC0000084, CPU_CALL, CPU_ALL, },
201 { 0xC0000100, 0xC0000102, CPU_BASE, CPU_ALL, },
202 { 0xC0000103, 0xC0000103, CPU_TIME, CPU_ALL, },
203
204 { 0xC0000408, 0xC000040A, CPU_MC, CPU_ALL, },
205
206 { 0xc0010000, 0xc0010007, CPU_PMC, CPU_ALL, },
207 { 0xc0010010, 0xc0010010, CPU_MTRR, CPU_ALL, },
208 { 0xc0010016, 0xc001001A, CPU_MTRR, CPU_ALL, },
209 { 0xc001001D, 0xc001001D, CPU_MTRR, CPU_ALL, },
210 { 0xc0010030, 0xc0010035, CPU_BIOS, CPU_ALL, },
211 { 0xc0010056, 0xc0010056, CPU_SMM, CPU_ALL, },
212 { 0xc0010061, 0xc0010063, CPU_SMM, CPU_ALL, },
213 { 0xc0010074, 0xc0010074, CPU_MC, CPU_ALL, },
214 { 0xc0010111, 0xc0010113, CPU_SMM, CPU_ALL, },
215 { 0xc0010114, 0xc0010118, CPU_SVM, CPU_ALL, },
216 { 0xc0010119, 0xc001011A, CPU_SMM, CPU_ALL, },
217 { 0xc0010140, 0xc0010141, CPU_OSVM, CPU_ALL, },
218 { 0xc0010156, 0xc0010156, CPU_SMM, CPU_ALL, },
219};
220
221
222static int get_cpu_modelflag(unsigned cpu)
223{
224 int flag;
225
226 switch (per_cpu(cpu_model, cpu)) {
227 /* Intel */
228 case 0x0501:
229 case 0x0502:
230 case 0x0504:
231 flag = CPU_INTEL_PENTIUM;
232 break;
233 case 0x0601:
234 case 0x0603:
235 case 0x0605:
236 case 0x0607:
237 case 0x0608:
238 case 0x060A:
239 case 0x060B:
240 flag = CPU_INTEL_P6;
241 break;
242 case 0x0609:
243 case 0x060D:
244 flag = CPU_INTEL_PENTIUM_M;
245 break;
246 case 0x060E:
247 flag = CPU_INTEL_CORE;
248 break;
249 case 0x060F:
250 case 0x0617:
251 flag = CPU_INTEL_CORE2;
252 break;
253 case 0x061C:
254 flag = CPU_INTEL_ATOM;
255 break;
256 case 0x0F00:
257 case 0x0F01:
258 case 0x0F02:
259 case 0x0F03:
260 case 0x0F04:
261 flag = CPU_INTEL_XEON_P4;
262 break;
263 case 0x0F06:
264 flag = CPU_INTEL_XEON_MP;
265 break;
266 default:
267 flag = CPU_NONE;
268 break;
269 }
270
271 return flag;
272}
273
274static int get_cpu_range_count(unsigned cpu)
275{
276 int index;
277
278 switch (per_cpu(cpu_model, cpu) >> 16) {
279 case X86_VENDOR_INTEL:
280 index = ARRAY_SIZE(cpu_intel_range);
281 break;
282 case X86_VENDOR_AMD:
283 index = ARRAY_SIZE(cpu_amd_range);
284 break;
285 default:
286 index = 0;
287 break;
288 }
289
290 return index;
291}
292
293static int is_typeflag_valid(unsigned cpu, unsigned flag)
294{
295 unsigned vendor, modelflag;
296 int i, index;
297
298 /* Standard Registers should be always valid */
299 if (flag >= CPU_TSS)
300 return 1;
301
302 modelflag = per_cpu(cpu_modelflag, cpu);
303 vendor = per_cpu(cpu_model, cpu) >> 16;
304 index = get_cpu_range_count(cpu);
305
306 for (i = 0; i < index; i++) {
307 switch (vendor) {
308 case X86_VENDOR_INTEL:
309 if ((cpu_intel_range[i].model & modelflag) &&
310 (cpu_intel_range[i].flag & flag))
311 return 1;
312 break;
313 case X86_VENDOR_AMD:
314 if (cpu_amd_range[i].flag & flag)
315 return 1;
316 break;
317 }
318 }
319
320 /* Invalid */
321 return 0;
322}
323
324static unsigned get_cpu_range(unsigned cpu, unsigned *min, unsigned *max,
325 int index, unsigned flag)
326{
327 unsigned modelflag;
328
329 modelflag = per_cpu(cpu_modelflag, cpu);
330 *max = 0;
331 switch (per_cpu(cpu_model, cpu) >> 16) {
332 case X86_VENDOR_INTEL:
333 if ((cpu_intel_range[index].model & modelflag) &&
334 (cpu_intel_range[index].flag & flag)) {
335 *min = cpu_intel_range[index].min;
336 *max = cpu_intel_range[index].max;
337 }
338 break;
339 case X86_VENDOR_AMD:
340 if (cpu_amd_range[index].flag & flag) {
341 *min = cpu_amd_range[index].min;
342 *max = cpu_amd_range[index].max;
343 }
344 break;
345 }
346
347 return *max;
348}
349
350/* This function can also be called with seq = NULL for printk */
351static void print_cpu_data(struct seq_file *seq, unsigned type,
352 u32 low, u32 high)
353{
354 struct cpu_private *priv;
355 u64 val = high;
356
357 if (seq) {
358 priv = seq->private;
359 if (priv->file) {
360 val = (val << 32) | low;
361 seq_printf(seq, "0x%llx\n", val);
362 } else
363 seq_printf(seq, " %08x: %08x_%08x\n",
364 type, high, low);
365 } else
366 printk(KERN_INFO " %08x: %08x_%08x\n", type, high, low);
367}
368
369/* This function can also be called with seq = NULL for printk */
370static void print_msr(struct seq_file *seq, unsigned cpu, unsigned flag)
371{
372 unsigned msr, msr_min, msr_max;
373 struct cpu_private *priv;
374 u32 low, high;
375 int i, range;
376
377 if (seq) {
378 priv = seq->private;
379 if (priv->file) {
380 if (!rdmsr_safe_on_cpu(priv->cpu, priv->reg,
381 &low, &high))
382 print_cpu_data(seq, priv->reg, low, high);
383 return;
384 }
385 }
386
387 range = get_cpu_range_count(cpu);
388
389 for (i = 0; i < range; i++) {
390 if (!get_cpu_range(cpu, &msr_min, &msr_max, i, flag))
391 continue;
392
393 for (msr = msr_min; msr <= msr_max; msr++) {
394 if (rdmsr_safe_on_cpu(cpu, msr, &low, &high))
395 continue;
396 print_cpu_data(seq, msr, low, high);
397 }
398 }
399}
400
401static void print_tss(void *arg)
402{
403 struct pt_regs *regs = task_pt_regs(current);
404 struct seq_file *seq = arg;
405 unsigned int seg;
406
407 seq_printf(seq, " RAX\t: %016lx\n", regs->ax);
408 seq_printf(seq, " RBX\t: %016lx\n", regs->bx);
409 seq_printf(seq, " RCX\t: %016lx\n", regs->cx);
410 seq_printf(seq, " RDX\t: %016lx\n", regs->dx);
411
412 seq_printf(seq, " RSI\t: %016lx\n", regs->si);
413 seq_printf(seq, " RDI\t: %016lx\n", regs->di);
414 seq_printf(seq, " RBP\t: %016lx\n", regs->bp);
415 seq_printf(seq, " ESP\t: %016lx\n", regs->sp);
416
417#ifdef CONFIG_X86_64
418 seq_printf(seq, " R08\t: %016lx\n", regs->r8);
419 seq_printf(seq, " R09\t: %016lx\n", regs->r9);
420 seq_printf(seq, " R10\t: %016lx\n", regs->r10);
421 seq_printf(seq, " R11\t: %016lx\n", regs->r11);
422 seq_printf(seq, " R12\t: %016lx\n", regs->r12);
423 seq_printf(seq, " R13\t: %016lx\n", regs->r13);
424 seq_printf(seq, " R14\t: %016lx\n", regs->r14);
425 seq_printf(seq, " R15\t: %016lx\n", regs->r15);
426#endif
427
428 asm("movl %%cs,%0" : "=r" (seg));
429 seq_printf(seq, " CS\t: %04x\n", seg);
430 asm("movl %%ds,%0" : "=r" (seg));
431 seq_printf(seq, " DS\t: %04x\n", seg);
432 seq_printf(seq, " SS\t: %04lx\n", regs->ss & 0xffff);
433 asm("movl %%es,%0" : "=r" (seg));
434 seq_printf(seq, " ES\t: %04x\n", seg);
435 asm("movl %%fs,%0" : "=r" (seg));
436 seq_printf(seq, " FS\t: %04x\n", seg);
437 asm("movl %%gs,%0" : "=r" (seg));
438 seq_printf(seq, " GS\t: %04x\n", seg);
439
440 seq_printf(seq, " EFLAGS\t: %016lx\n", regs->flags);
441
442 seq_printf(seq, " EIP\t: %016lx\n", regs->ip);
443}
444
445static void print_cr(void *arg)
446{
447 struct seq_file *seq = arg;
448
449 seq_printf(seq, " cr0\t: %016lx\n", read_cr0());
450 seq_printf(seq, " cr2\t: %016lx\n", read_cr2());
451 seq_printf(seq, " cr3\t: %016lx\n", read_cr3());
452 seq_printf(seq, " cr4\t: %016lx\n", read_cr4_safe());
453#ifdef CONFIG_X86_64
454 seq_printf(seq, " cr8\t: %016lx\n", read_cr8());
455#endif
456}
457
458static void print_desc_ptr(char *str, struct seq_file *seq, struct desc_ptr dt)
459{
460 seq_printf(seq, " %s\t: %016llx\n", str, (u64)(dt.address | dt.size));
461}
462
463static void print_dt(void *seq)
464{
465 struct desc_ptr dt;
466 unsigned long ldt;
467
468 /* IDT */
469 store_idt((struct desc_ptr *)&dt);
470 print_desc_ptr("IDT", seq, dt);
471
472 /* GDT */
473 store_gdt((struct desc_ptr *)&dt);
474 print_desc_ptr("GDT", seq, dt);
475
476 /* LDT */
477 store_ldt(ldt);
478 seq_printf(seq, " LDT\t: %016lx\n", ldt);
479
480 /* TR */
481 store_tr(ldt);
482 seq_printf(seq, " TR\t: %016lx\n", ldt);
483}
484
485static void print_dr(void *arg)
486{
487 struct seq_file *seq = arg;
488 unsigned long dr;
489 int i;
490
491 for (i = 0; i < 8; i++) {
492 /* Ignore db4, db5 */
493 if ((i == 4) || (i == 5))
494 continue;
495 get_debugreg(dr, i);
496 seq_printf(seq, " dr%d\t: %016lx\n", i, dr);
497 }
498
499 seq_printf(seq, "\n MSR\t:\n");
500}
501
502static void print_apic(void *arg)
503{
504 struct seq_file *seq = arg;
505
506#ifdef CONFIG_X86_LOCAL_APIC
507 seq_printf(seq, " LAPIC\t:\n");
508 seq_printf(seq, " ID\t\t: %08x\n", apic_read(APIC_ID) >> 24);
509 seq_printf(seq, " LVR\t\t: %08x\n", apic_read(APIC_LVR));
510 seq_printf(seq, " TASKPRI\t: %08x\n", apic_read(APIC_TASKPRI));
511 seq_printf(seq, " ARBPRI\t\t: %08x\n", apic_read(APIC_ARBPRI));
512 seq_printf(seq, " PROCPRI\t: %08x\n", apic_read(APIC_PROCPRI));
513 seq_printf(seq, " LDR\t\t: %08x\n", apic_read(APIC_LDR));
514 seq_printf(seq, " DFR\t\t: %08x\n", apic_read(APIC_DFR));
515 seq_printf(seq, " SPIV\t\t: %08x\n", apic_read(APIC_SPIV));
516 seq_printf(seq, " ISR\t\t: %08x\n", apic_read(APIC_ISR));
517 seq_printf(seq, " ESR\t\t: %08x\n", apic_read(APIC_ESR));
518 seq_printf(seq, " ICR\t\t: %08x\n", apic_read(APIC_ICR));
519 seq_printf(seq, " ICR2\t\t: %08x\n", apic_read(APIC_ICR2));
520 seq_printf(seq, " LVTT\t\t: %08x\n", apic_read(APIC_LVTT));
521 seq_printf(seq, " LVTTHMR\t: %08x\n", apic_read(APIC_LVTTHMR));
522 seq_printf(seq, " LVTPC\t\t: %08x\n", apic_read(APIC_LVTPC));
523 seq_printf(seq, " LVT0\t\t: %08x\n", apic_read(APIC_LVT0));
524 seq_printf(seq, " LVT1\t\t: %08x\n", apic_read(APIC_LVT1));
525 seq_printf(seq, " LVTERR\t\t: %08x\n", apic_read(APIC_LVTERR));
526 seq_printf(seq, " TMICT\t\t: %08x\n", apic_read(APIC_TMICT));
527 seq_printf(seq, " TMCCT\t\t: %08x\n", apic_read(APIC_TMCCT));
528 seq_printf(seq, " TDCR\t\t: %08x\n", apic_read(APIC_TDCR));
529#endif /* CONFIG_X86_LOCAL_APIC */
530
531 seq_printf(seq, "\n MSR\t:\n");
532}
533
534static int cpu_seq_show(struct seq_file *seq, void *v)
535{
536 struct cpu_private *priv = seq->private;
537
538 if (priv == NULL)
539 return -EINVAL;
540
541 switch (cpu_base[priv->type].flag) {
542 case CPU_TSS:
543 smp_call_function_single(priv->cpu, print_tss, seq, 1);
544 break;
545 case CPU_CR:
546 smp_call_function_single(priv->cpu, print_cr, seq, 1);
547 break;
548 case CPU_DT:
549 smp_call_function_single(priv->cpu, print_dt, seq, 1);
550 break;
551 case CPU_DEBUG:
552 if (priv->file == CPU_INDEX_BIT)
553 smp_call_function_single(priv->cpu, print_dr, seq, 1);
554 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
555 break;
556 case CPU_APIC:
557 if (priv->file == CPU_INDEX_BIT)
558 smp_call_function_single(priv->cpu, print_apic, seq, 1);
559 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
560 break;
561
562 default:
563 print_msr(seq, priv->cpu, cpu_base[priv->type].flag);
564 break;
565 }
566 seq_printf(seq, "\n");
567
568 return 0;
569}
570
571static void *cpu_seq_start(struct seq_file *seq, loff_t *pos)
572{
573 if (*pos == 0) /* One time is enough ;-) */
574 return seq;
575
576 return NULL;
577}
578
579static void *cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
580{
581 (*pos)++;
582
583 return cpu_seq_start(seq, pos);
584}
585
586static void cpu_seq_stop(struct seq_file *seq, void *v)
587{
588}
589
590static const struct seq_operations cpu_seq_ops = {
591 .start = cpu_seq_start,
592 .next = cpu_seq_next,
593 .stop = cpu_seq_stop,
594 .show = cpu_seq_show,
595};
596
597static int cpu_seq_open(struct inode *inode, struct file *file)
598{
599 struct cpu_private *priv = inode->i_private;
600 struct seq_file *seq;
601 int err;
602
603 err = seq_open(file, &cpu_seq_ops);
604 if (!err) {
605 seq = file->private_data;
606 seq->private = priv;
607 }
608
609 return err;
610}
611
612static int write_msr(struct cpu_private *priv, u64 val)
613{
614 u32 low, high;
615
616 high = (val >> 32) & 0xffffffff;
617 low = val & 0xffffffff;
618
619 if (!wrmsr_safe_on_cpu(priv->cpu, priv->reg, low, high))
620 return 0;
621
622 return -EPERM;
623}
624
625static int write_cpu_register(struct cpu_private *priv, const char *buf)
626{
627 int ret = -EPERM;
628 u64 val;
629
630 ret = strict_strtoull(buf, 0, &val);
631 if (ret < 0)
632 return ret;
633
634 /* Supporting only MSRs */
635 if (priv->type < CPU_TSS_BIT)
636 return write_msr(priv, val);
637
638 return ret;
639}
640
641static ssize_t cpu_write(struct file *file, const char __user *ubuf,
642 size_t count, loff_t *off)
643{
644 struct seq_file *seq = file->private_data;
645 struct cpu_private *priv = seq->private;
646 char buf[19];
647
648 if ((priv == NULL) || (count >= sizeof(buf)))
649 return -EINVAL;
650
651 if (copy_from_user(&buf, ubuf, count))
652 return -EFAULT;
653
654 buf[count] = 0;
655
656 if ((cpu_base[priv->type].write) && (cpu_file[priv->file].write))
657 if (!write_cpu_register(priv, buf))
658 return count;
659
660 return -EACCES;
661}
662
663static const struct file_operations cpu_fops = {
664 .owner = THIS_MODULE,
665 .open = cpu_seq_open,
666 .read = seq_read,
667 .write = cpu_write,
668 .llseek = seq_lseek,
669 .release = seq_release,
670};
671
672static int cpu_create_file(unsigned cpu, unsigned type, unsigned reg,
673 unsigned file, struct dentry *dentry)
674{
675 struct cpu_private *priv = NULL;
676
677 /* Already intialized */
678 if (file == CPU_INDEX_BIT)
679 if (per_cpu(cpu_arr[type].init, cpu))
680 return 0;
681
682 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
683 if (priv == NULL)
684 return -ENOMEM;
685
686 priv->cpu = cpu;
687 priv->type = type;
688 priv->reg = reg;
689 priv->file = file;
690 mutex_lock(&cpu_debug_lock);
691 per_cpu(priv_arr[type], cpu) = priv;
692 per_cpu(cpu_priv_count, cpu)++;
693 mutex_unlock(&cpu_debug_lock);
694
695 if (file)
696 debugfs_create_file(cpu_file[file].name, S_IRUGO,
697 dentry, (void *)priv, &cpu_fops);
698 else {
699 debugfs_create_file(cpu_base[type].name, S_IRUGO,
700 per_cpu(cpu_arr[type].dentry, cpu),
701 (void *)priv, &cpu_fops);
702 mutex_lock(&cpu_debug_lock);
703 per_cpu(cpu_arr[type].init, cpu) = 1;
704 mutex_unlock(&cpu_debug_lock);
705 }
706
707 return 0;
708}
709
710static int cpu_init_regfiles(unsigned cpu, unsigned int type, unsigned reg,
711 struct dentry *dentry)
712{
713 unsigned file;
714 int err = 0;
715
716 for (file = 0; file < ARRAY_SIZE(cpu_file); file++) {
717 err = cpu_create_file(cpu, type, reg, file, dentry);
718 if (err)
719 return err;
720 }
721
722 return err;
723}
724
725static int cpu_init_msr(unsigned cpu, unsigned type, struct dentry *dentry)
726{
727 struct dentry *cpu_dentry = NULL;
728 unsigned reg, reg_min, reg_max;
729 int i, range, err = 0;
730 char reg_dir[12];
731 u32 low, high;
732
733 range = get_cpu_range_count(cpu);
734
735 for (i = 0; i < range; i++) {
736 if (!get_cpu_range(cpu, &reg_min, &reg_max, i,
737 cpu_base[type].flag))
738 continue;
739
740 for (reg = reg_min; reg <= reg_max; reg++) {
741 if (rdmsr_safe_on_cpu(cpu, reg, &low, &high))
742 continue;
743
744 sprintf(reg_dir, "0x%x", reg);
745 cpu_dentry = debugfs_create_dir(reg_dir, dentry);
746 err = cpu_init_regfiles(cpu, type, reg, cpu_dentry);
747 if (err)
748 return err;
749 }
750 }
751
752 return err;
753}
754
755static int cpu_init_allreg(unsigned cpu, struct dentry *dentry)
756{
757 struct dentry *cpu_dentry = NULL;
758 unsigned type;
759 int err = 0;
760
761 for (type = 0; type < ARRAY_SIZE(cpu_base) - 1; type++) {
762 if (!is_typeflag_valid(cpu, cpu_base[type].flag))
763 continue;
764 cpu_dentry = debugfs_create_dir(cpu_base[type].name, dentry);
765 per_cpu(cpu_arr[type].dentry, cpu) = cpu_dentry;
766
767 if (type < CPU_TSS_BIT)
768 err = cpu_init_msr(cpu, type, cpu_dentry);
769 else
770 err = cpu_create_file(cpu, type, 0, CPU_INDEX_BIT,
771 cpu_dentry);
772 if (err)
773 return err;
774 }
775
776 return err;
777}
778
779static int cpu_init_cpu(void)
780{
781 struct dentry *cpu_dentry = NULL;
782 struct cpuinfo_x86 *cpui;
783 char cpu_dir[12];
784 unsigned cpu;
785 int err = 0;
786
787 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
788 cpui = &cpu_data(cpu);
789 if (!cpu_has(cpui, X86_FEATURE_MSR))
790 continue;
791 per_cpu(cpu_model, cpu) = ((cpui->x86_vendor << 16) |
792 (cpui->x86 << 8) |
793 (cpui->x86_model));
794 per_cpu(cpu_modelflag, cpu) = get_cpu_modelflag(cpu);
795
796 sprintf(cpu_dir, "cpu%d", cpu);
797 cpu_dentry = debugfs_create_dir(cpu_dir, cpu_debugfs_dir);
798 err = cpu_init_allreg(cpu, cpu_dentry);
799
800 pr_info("cpu%d(%d) debug files %d\n",
801 cpu, nr_cpu_ids, per_cpu(cpu_priv_count, cpu));
802 if (per_cpu(cpu_priv_count, cpu) > MAX_CPU_FILES) {
803 pr_err("Register files count %d exceeds limit %d\n",
804 per_cpu(cpu_priv_count, cpu), MAX_CPU_FILES);
805 per_cpu(cpu_priv_count, cpu) = MAX_CPU_FILES;
806 err = -ENFILE;
807 }
808 if (err)
809 return err;
810 }
811
812 return err;
813}
814
815static int __init cpu_debug_init(void)
816{
817 cpu_debugfs_dir = debugfs_create_dir("cpu", arch_debugfs_dir);
818
819 return cpu_init_cpu();
820}
821
822static void __exit cpu_debug_exit(void)
823{
824 int i, cpu;
825
826 if (cpu_debugfs_dir)
827 debugfs_remove_recursive(cpu_debugfs_dir);
828
829 for (cpu = 0; cpu < nr_cpu_ids; cpu++)
830 for (i = 0; i < per_cpu(cpu_priv_count, cpu); i++)
831 kfree(per_cpu(priv_arr[i], cpu));
832}
833
834module_init(cpu_debug_init);
835module_exit(cpu_debug_exit);
836
837MODULE_AUTHOR("Jaswinder Singh Rajput");
838MODULE_DESCRIPTION("CPU Debug module");
839MODULE_LICENSE("GPL");
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index ffd0f5ed071..593171e967e 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -61,23 +61,23 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb __cpuinitdata = 0;
63 63
64static char Cx86_model[][9] __cpuinitdata = { 64static const char __cpuinitconst Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static char Cx486_name[][5] __cpuinitdata = { 68static const char __cpuinitconst Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static char Cx486S_name[][4] __cpuinitdata = { 72static const char __cpuinitconst Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static char Cx486D_name[][4] __cpuinitdata = { 75static const char __cpuinitconst Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock";
79static char cyrix_model_mult1[] __cpuinitdata = "12??43"; 79static const char __cpuinitconst cyrix_model_mult1[] = "12??43";
80static char cyrix_model_mult2[] __cpuinitdata = "12233445"; 80static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -435,7 +435,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
435 } 435 }
436} 436}
437 437
438static struct cpu_dev cyrix_cpu_dev __cpuinitdata = { 438static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
439 .c_vendor = "Cyrix", 439 .c_vendor = "Cyrix",
440 .c_ident = { "CyrixInstead" }, 440 .c_ident = { "CyrixInstead" },
441 .c_early_init = early_init_cyrix, 441 .c_early_init = early_init_cyrix,
@@ -446,7 +446,7 @@ static struct cpu_dev cyrix_cpu_dev __cpuinitdata = {
446 446
447cpu_dev_register(cyrix_cpu_dev); 447cpu_dev_register(cyrix_cpu_dev);
448 448
449static struct cpu_dev nsc_cpu_dev __cpuinitdata = { 449static const struct cpu_dev __cpuinitconst nsc_cpu_dev = {
450 .c_vendor = "NSC", 450 .c_vendor = "NSC",
451 .c_ident = { "Geode by NSC" }, 451 .c_ident = { "Geode by NSC" },
452 .c_init = init_nsc, 452 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index ae769471042..b09d4eb52bb 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -415,7 +415,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
415} 415}
416#endif 416#endif
417 417
418static struct cpu_dev intel_cpu_dev __cpuinitdata = { 418static const struct cpu_dev __cpuinitconst intel_cpu_dev = {
419 .c_vendor = "Intel", 419 .c_vendor = "Intel",
420 .c_ident = { "GenuineIntel" }, 420 .c_ident = { "GenuineIntel" },
421#ifdef CONFIG_X86_32 421#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 7293508d8f5..c471eb1a389 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -32,7 +32,7 @@ struct _cache_table
32}; 32};
33 33
34/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 34/* all the cache descriptor types we care about (no TLB or trace cache entries) */
35static struct _cache_table cache_table[] __cpuinitdata = 35static const struct _cache_table __cpuinitconst cache_table[] =
36{ 36{
37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 37 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 38 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -206,15 +206,15 @@ union l3_cache {
206 unsigned val; 206 unsigned val;
207}; 207};
208 208
209static unsigned short assocs[] __cpuinitdata = { 209static const unsigned short __cpuinitconst assocs[] = {
210 [1] = 1, [2] = 2, [4] = 4, [6] = 8, 210 [1] = 1, [2] = 2, [4] = 4, [6] = 8,
211 [8] = 16, [0xa] = 32, [0xb] = 48, 211 [8] = 16, [0xa] = 32, [0xb] = 48,
212 [0xc] = 64, 212 [0xc] = 64,
213 [0xf] = 0xffff // ?? 213 [0xf] = 0xffff // ??
214}; 214};
215 215
216static unsigned char levels[] __cpuinitdata = { 1, 1, 2, 3 }; 216static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217static unsigned char types[] __cpuinitdata = { 1, 2, 3, 3 }; 217static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
218 218
219static void __cpuinit 219static void __cpuinit
220amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 220amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 52b3fefbd5a..bb62b3e5caa 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static struct cpu_dev transmeta_cpu_dev __cpuinitdata = { 101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index e777f79e096..fd2c37bf7ac 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static struct cpu_dev umc_cpu_dev __cpuinitdata = { 11static const struct cpu_dev __cpuinitconst umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {