aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYinghai Lu <yhlu.kernel@gmail.com>2008-09-04 15:09:47 -0400
committerIngo Molnar <mingo@elte.hu>2008-09-04 15:09:47 -0400
commit0a488a53d7ca46ac638c30079072c57e50cfcc7b (patch)
treea2fab0de7f85019c98c348a93c275f243e2a0392
parent01b2e16a7a9be6573cba5d594d6659b3c6cb46a0 (diff)
x86: move 32bit related functions together
Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/cpu/common.c251
-rw-r--r--arch/x86/kernel/cpu/common_64.c34
2 files changed, 143 insertions, 142 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index a79cf5c52b6a..2b2c170e8d69 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -22,6 +22,8 @@
22 22
23#include "cpu.h" 23#include "cpu.h"
24 24
25static struct cpu_dev *this_cpu __cpuinitdata;
26
25DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = { 27DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
26 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } }, 28 [GDT_ENTRY_KERNEL_CS] = { { { 0x0000ffff, 0x00cf9a00 } } },
27 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } }, 29 [GDT_ENTRY_KERNEL_DS] = { { { 0x0000ffff, 0x00cf9200 } } },
@@ -58,6 +60,109 @@ DEFINE_PER_CPU(struct gdt_page, gdt_page) = { .gdt = {
58} }; 60} };
59EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); 61EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
60 62
63static int cachesize_override __cpuinitdata = -1;
64static int disable_x86_serial_nr __cpuinitdata = 1;
65
66static int __init cachesize_setup(char *str)
67{
68 get_option(&str, &cachesize_override);
69 return 1;
70}
71__setup("cachesize=", cachesize_setup);
72
73/*
74 * Naming convention should be: <Name> [(<Codename>)]
75 * This table only is used unless init_<vendor>() below doesn't set it;
76 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
77 *
78 */
79
80/* Look up CPU names by table lookup. */
81static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
82{
83 struct cpu_model_info *info;
84
85 if (c->x86_model >= 16)
86 return NULL; /* Range check */
87
88 if (!this_cpu)
89 return NULL;
90
91 info = this_cpu->c_models;
92
93 while (info && info->family) {
94 if (info->family == c->x86)
95 return info->model_names[c->x86_model];
96 info++;
97 }
98 return NULL; /* Not found */
99}
100
101static int __init x86_fxsr_setup(char *s)
102{
103 setup_clear_cpu_cap(X86_FEATURE_FXSR);
104 setup_clear_cpu_cap(X86_FEATURE_XMM);
105 return 1;
106}
107__setup("nofxsr", x86_fxsr_setup);
108
109static int __init x86_sep_setup(char *s)
110{
111 setup_clear_cpu_cap(X86_FEATURE_SEP);
112 return 1;
113}
114__setup("nosep", x86_sep_setup);
115
116/* Standard macro to see if a specific flag is changeable */
117static inline int flag_is_changeable_p(u32 flag)
118{
119 u32 f1, f2;
120
121 asm("pushfl\n\t"
122 "pushfl\n\t"
123 "popl %0\n\t"
124 "movl %0,%1\n\t"
125 "xorl %2,%0\n\t"
126 "pushl %0\n\t"
127 "popfl\n\t"
128 "pushfl\n\t"
129 "popl %0\n\t"
130 "popfl\n\t"
131 : "=&r" (f1), "=&r" (f2)
132 : "ir" (flag));
133
134 return ((f1^f2) & flag) != 0;
135}
136
137/* Probe for the CPUID instruction */
138static int __cpuinit have_cpuid_p(void)
139{
140 return flag_is_changeable_p(X86_EFLAGS_ID);
141}
142
143static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
144{
145 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
146 /* Disable processor serial number */
147 unsigned long lo, hi;
148 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
149 lo |= 0x200000;
150 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
151 printk(KERN_NOTICE "CPU serial number disabled.\n");
152 clear_cpu_cap(c, X86_FEATURE_PN);
153
154 /* Disabling the serial number may affect the cpuid level */
155 c->cpuid_level = cpuid_eax(0);
156 }
157}
158
159static int __init x86_serial_nr_setup(char *s)
160{
161 disable_x86_serial_nr = 0;
162 return 1;
163}
164__setup("serialnumber", x86_serial_nr_setup);
165
61__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata; 166__u32 cleared_cpu_caps[NCAPINTS] __cpuinitdata;
62 167
63/* Current gdt points %fs at the "master" per-cpu area: after this, 168/* Current gdt points %fs at the "master" per-cpu area: after this,
@@ -72,9 +177,6 @@ void switch_to_new_gdt(void)
72 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory"); 177 asm("mov %0, %%fs" : : "r" (__KERNEL_PERCPU) : "memory");
73} 178}
74 179
75static int cachesize_override __cpuinitdata = -1;
76static int disable_x86_serial_nr __cpuinitdata = 1;
77
78static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; 180static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
79 181
80static void __cpuinit default_init(struct cpuinfo_x86 *c) 182static void __cpuinit default_init(struct cpuinfo_x86 *c)
@@ -95,14 +197,6 @@ static struct cpu_dev __cpuinitdata default_cpu = {
95 .c_vendor = "Unknown", 197 .c_vendor = "Unknown",
96 .c_x86_vendor = X86_VENDOR_UNKNOWN, 198 .c_x86_vendor = X86_VENDOR_UNKNOWN,
97}; 199};
98static struct cpu_dev *this_cpu __cpuinitdata;
99
100static int __init cachesize_setup(char *str)
101{
102 get_option(&str, &cachesize_override);
103 return 1;
104}
105__setup("cachesize=", cachesize_setup);
106 200
107int __cpuinit get_model_name(struct cpuinfo_x86 *c) 201int __cpuinit get_model_name(struct cpuinfo_x86 *c)
108{ 202{
@@ -133,7 +227,6 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
133 return 1; 227 return 1;
134} 228}
135 229
136
137void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 230void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
138{ 231{
139 unsigned int n, dummy, ebx, ecx, edx, l2size; 232 unsigned int n, dummy, ebx, ecx, edx, l2size;
@@ -150,7 +243,7 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
150 if (n < 0x80000006) /* Some chips just has a large L1. */ 243 if (n < 0x80000006) /* Some chips just has a large L1. */
151 return; 244 return;
152 245
153 ecx = cpuid_ecx(0x80000006); 246 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
154 l2size = ecx >> 16; 247 l2size = ecx >> 16;
155 248
156 /* do processor-specific cache resizing */ 249 /* do processor-specific cache resizing */
@@ -167,48 +260,23 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
167 c->x86_cache_size = l2size; 260 c->x86_cache_size = l2size;
168 261
169 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 262 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
170 l2size, ecx & 0xFF); 263 l2size, ecx & 0xFF);
171}
172
173/*
174 * Naming convention should be: <Name> [(<Codename>)]
175 * This table only is used unless init_<vendor>() below doesn't set it;
176 * in particular, if CPUID levels 0x80000002..4 are supported, this isn't used
177 *
178 */
179
180/* Look up CPU names by table lookup. */
181static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
182{
183 struct cpu_model_info *info;
184
185 if (c->x86_model >= 16)
186 return NULL; /* Range check */
187
188 if (!this_cpu)
189 return NULL;
190
191 info = this_cpu->c_models;
192
193 while (info && info->family) {
194 if (info->family == c->x86)
195 return info->model_names[c->x86_model];
196 info++;
197 }
198 return NULL; /* Not found */
199} 264}
200 265
201#ifdef CONFIG_X86_HT 266#ifdef CONFIG_X86_HT
202void __cpuinit detect_ht(struct cpuinfo_x86 *c) 267void __cpuinit detect_ht(struct cpuinfo_x86 *c)
203{ 268{
204 u32 eax, ebx, ecx, edx; 269 u32 eax, ebx, ecx, edx;
205 int index_msb, core_bits; 270 int index_msb, core_bits;
206
207 cpuid(1, &eax, &ebx, &ecx, &edx);
208 271
209 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 272 if (!cpu_has(c, X86_FEATURE_HT))
210 return; 273 return;
211 274
275 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
276 goto out;
277
278 cpuid(1, &eax, &ebx, &ecx, &edx);
279
212 smp_num_siblings = (ebx & 0xff0000) >> 16; 280 smp_num_siblings = (ebx & 0xff0000) >> 16;
213 281
214 if (smp_num_siblings == 1) { 282 if (smp_num_siblings == 1) {
@@ -225,8 +293,6 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
225 index_msb = get_count_order(smp_num_siblings); 293 index_msb = get_count_order(smp_num_siblings);
226 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb); 294 c->phys_proc_id = phys_pkg_id(c->initial_apicid, index_msb);
227 295
228 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
229 c->phys_proc_id);
230 296
231 smp_num_siblings = smp_num_siblings / c->x86_max_cores; 297 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
232 298
@@ -236,10 +302,14 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
236 302
237 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) & 303 c->cpu_core_id = phys_pkg_id(c->initial_apicid, index_msb) &
238 ((1 << core_bits) - 1); 304 ((1 << core_bits) - 1);
305 }
239 306
240 if (c->x86_max_cores > 1) 307out:
241 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 308 if ((c->x86_max_cores * smp_num_siblings) > 1) {
242 c->cpu_core_id); 309 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
310 c->phys_proc_id);
311 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
312 c->cpu_core_id);
243 } 313 }
244} 314}
245#endif 315#endif
@@ -273,52 +343,6 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
273 this_cpu = &default_cpu; 343 this_cpu = &default_cpu;
274} 344}
275 345
276
277static int __init x86_fxsr_setup(char *s)
278{
279 setup_clear_cpu_cap(X86_FEATURE_FXSR);
280 setup_clear_cpu_cap(X86_FEATURE_XMM);
281 return 1;
282}
283__setup("nofxsr", x86_fxsr_setup);
284
285
286static int __init x86_sep_setup(char *s)
287{
288 setup_clear_cpu_cap(X86_FEATURE_SEP);
289 return 1;
290}
291__setup("nosep", x86_sep_setup);
292
293
294/* Standard macro to see if a specific flag is changeable */
295static inline int flag_is_changeable_p(u32 flag)
296{
297 u32 f1, f2;
298
299 asm("pushfl\n\t"
300 "pushfl\n\t"
301 "popl %0\n\t"
302 "movl %0,%1\n\t"
303 "xorl %2,%0\n\t"
304 "pushl %0\n\t"
305 "popfl\n\t"
306 "pushfl\n\t"
307 "popl %0\n\t"
308 "popfl\n\t"
309 : "=&r" (f1), "=&r" (f2)
310 : "ir" (flag));
311
312 return ((f1^f2) & flag) != 0;
313}
314
315
316/* Probe for the CPUID instruction */
317static int __cpuinit have_cpuid_p(void)
318{
319 return flag_is_changeable_p(X86_EFLAGS_ID);
320}
321
322void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 346void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
323{ 347{
324 /* Get vendor name */ 348 /* Get vendor name */
@@ -380,16 +404,16 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
380 */ 404 */
381static void __init early_identify_cpu(struct cpuinfo_x86 *c) 405static void __init early_identify_cpu(struct cpuinfo_x86 *c)
382{ 406{
383 c->x86_cache_alignment = 32;
384 c->x86_clflush_size = 32; 407 c->x86_clflush_size = 32;
408 c->x86_cache_alignment = c->x86_clflush_size;
385 409
386 if (!have_cpuid_p()) 410 if (!have_cpuid_p())
387 return; 411 return;
388 412
389 c->extended_cpuid_level = 0;
390
391 memset(&c->x86_capability, 0, sizeof c->x86_capability); 413 memset(&c->x86_capability, 0, sizeof c->x86_capability);
392 414
415 c->extended_cpuid_level = 0;
416
393 cpu_detect(c); 417 cpu_detect(c);
394 418
395 get_cpu_vendor(c); 419 get_cpu_vendor(c);
@@ -487,31 +511,6 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
487 detect_nopl(c); 511 detect_nopl(c);
488} 512}
489 513
490static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
491{
492 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr) {
493 /* Disable processor serial number */
494 unsigned long lo, hi;
495 rdmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
496 lo |= 0x200000;
497 wrmsr(MSR_IA32_BBL_CR_CTL, lo, hi);
498 printk(KERN_NOTICE "CPU serial number disabled.\n");
499 clear_cpu_cap(c, X86_FEATURE_PN);
500
501 /* Disabling the serial number may affect the cpuid level */
502 c->cpuid_level = cpuid_eax(0);
503 }
504}
505
506static int __init x86_serial_nr_setup(char *s)
507{
508 disable_x86_serial_nr = 0;
509 return 1;
510}
511__setup("serialnumber", x86_serial_nr_setup);
512
513
514
515/* 514/*
516 * This does the hard work of actually picking apart the CPU stuff... 515 * This does the hard work of actually picking apart the CPU stuff...
517 */ 516 */
diff --git a/arch/x86/kernel/cpu/common_64.c b/arch/x86/kernel/cpu/common_64.c
index f1fb94e766bb..b2da04135326 100644
--- a/arch/x86/kernel/cpu/common_64.c
+++ b/arch/x86/kernel/cpu/common_64.c
@@ -37,6 +37,8 @@
37 37
38#include "cpu.h" 38#include "cpu.h"
39 39
40static struct cpu_dev *this_cpu __cpuinitdata;
41
40/* We need valid kernel segments for data and code in long mode too 42/* We need valid kernel segments for data and code in long mode too
41 * IRET will check the segment types kkeil 2000/10/28 43 * IRET will check the segment types kkeil 2000/10/28
42 * Also sysret mandates a special GDT layout 44 * Also sysret mandates a special GDT layout
@@ -78,7 +80,6 @@ static struct cpu_dev __cpuinitdata default_cpu = {
78 .c_vendor = "Unknown", 80 .c_vendor = "Unknown",
79 .c_x86_vendor = X86_VENDOR_UNKNOWN, 81 .c_x86_vendor = X86_VENDOR_UNKNOWN,
80}; 82};
81static struct cpu_dev *this_cpu __cpuinitdata;
82 83
83int __cpuinit get_model_name(struct cpuinfo_x86 *c) 84int __cpuinit get_model_name(struct cpuinfo_x86 *c)
84{ 85{
@@ -112,7 +113,7 @@ int __cpuinit get_model_name(struct cpuinfo_x86 *c)
112 113
113void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) 114void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
114{ 115{
115 unsigned int n, dummy, ebx, ecx, edx; 116 unsigned int n, dummy, ebx, ecx, edx, l2size;
116 117
117 n = c->extended_cpuid_level; 118 n = c->extended_cpuid_level;
118 119
@@ -125,15 +126,17 @@ void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
125 c->x86_tlbsize = 0; 126 c->x86_tlbsize = 0;
126 } 127 }
127 128
128 if (n >= 0x80000006) { 129 if (n < 0x80000006) /* Some chips just has a large L1. */
129 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx); 130 return;
130 ecx = cpuid_ecx(0x80000006);
131 c->x86_cache_size = ecx >> 16;
132 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
133 131
134 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n", 132 cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
135 c->x86_cache_size, ecx & 0xFF); 133 l2size = ecx >> 16;
136 } 134 c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
135
136 c->x86_cache_size = l2size;
137
138 printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
139 l2size, ecx & 0xFF);
137} 140}
138 141
139void __cpuinit detect_ht(struct cpuinfo_x86 *c) 142void __cpuinit detect_ht(struct cpuinfo_x86 *c)
@@ -142,14 +145,13 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
142 u32 eax, ebx, ecx, edx; 145 u32 eax, ebx, ecx, edx;
143 int index_msb, core_bits; 146 int index_msb, core_bits;
144 147
145 cpuid(1, &eax, &ebx, &ecx, &edx);
146
147
148 if (!cpu_has(c, X86_FEATURE_HT)) 148 if (!cpu_has(c, X86_FEATURE_HT))
149 return; 149 return;
150 if (cpu_has(c, X86_FEATURE_CMP_LEGACY)) 150 if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
151 goto out; 151 goto out;
152 152
153 cpuid(1, &eax, &ebx, &ecx, &edx);
154
153 smp_num_siblings = (ebx & 0xff0000) >> 16; 155 smp_num_siblings = (ebx & 0xff0000) >> 16;
154 156
155 if (smp_num_siblings == 1) { 157 if (smp_num_siblings == 1) {
@@ -175,6 +177,7 @@ void __cpuinit detect_ht(struct cpuinfo_x86 *c)
175 c->cpu_core_id = phys_pkg_id(index_msb) & 177 c->cpu_core_id = phys_pkg_id(index_msb) &
176 ((1 << core_bits) - 1); 178 ((1 << core_bits) - 1);
177 } 179 }
180
178out: 181out:
179 if ((c->x86_max_cores * smp_num_siblings) > 1) { 182 if ((c->x86_max_cores * smp_num_siblings) > 1) {
180 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 183 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
@@ -182,7 +185,6 @@ out:
182 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 185 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
183 c->cpu_core_id); 186 c->cpu_core_id);
184 } 187 }
185
186#endif 188#endif
187} 189}
188 190
@@ -405,10 +407,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
405 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 407 c->x86_model = c->x86_mask = 0; /* So far unknown... */
406 c->x86_vendor_id[0] = '\0'; /* Unset */ 408 c->x86_vendor_id[0] = '\0'; /* Unset */
407 c->x86_model_id[0] = '\0'; /* Unset */ 409 c->x86_model_id[0] = '\0'; /* Unset */
408 c->x86_clflush_size = 64;
409 c->x86_cache_alignment = c->x86_clflush_size;
410 c->x86_max_cores = 1; 410 c->x86_max_cores = 1;
411 c->x86_coreid_bits = 0; 411 c->x86_coreid_bits = 0;
412 c->x86_clflush_size = 64;
413 c->x86_cache_alignment = c->x86_clflush_size;
412 memset(&c->x86_capability, 0, sizeof c->x86_capability); 414 memset(&c->x86_capability, 0, sizeof c->x86_capability);
413 415
414 generic_identify(c); 416 generic_identify(c);