diff options
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 38 |
1 files changed, 23 insertions, 15 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index a9e3791ca09..e2962cc1e27 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -70,7 +70,7 @@ cpumask_t cpu_callin_map; | |||
70 | #endif /* CONFIG_X86_32 */ | 70 | #endif /* CONFIG_X86_32 */ |
71 | 71 | ||
72 | 72 | ||
73 | static struct cpu_dev *this_cpu __cpuinitdata; | 73 | static const struct cpu_dev *this_cpu __cpuinitdata; |
74 | 74 | ||
75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
76 | #ifdef CONFIG_X86_64 | 76 | #ifdef CONFIG_X86_64 |
@@ -284,9 +284,9 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
284 | */ | 284 | */ |
285 | 285 | ||
286 | /* Look up CPU names by table lookup. */ | 286 | /* Look up CPU names by table lookup. */ |
287 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 287 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) |
288 | { | 288 | { |
289 | struct cpu_model_info *info; | 289 | const struct cpu_model_info *info; |
290 | 290 | ||
291 | if (c->x86_model >= 16) | 291 | if (c->x86_model >= 16) |
292 | return NULL; /* Range check */ | 292 | return NULL; /* Range check */ |
@@ -333,7 +333,7 @@ void switch_to_new_gdt(int cpu) | |||
333 | load_percpu_segment(cpu); | 333 | load_percpu_segment(cpu); |
334 | } | 334 | } |
335 | 335 | ||
336 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 336 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
337 | 337 | ||
338 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 338 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
339 | { | 339 | { |
@@ -352,7 +352,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
352 | #endif | 352 | #endif |
353 | } | 353 | } |
354 | 354 | ||
355 | static struct cpu_dev __cpuinitdata default_cpu = { | 355 | static const struct cpu_dev __cpuinitconst default_cpu = { |
356 | .c_init = default_init, | 356 | .c_init = default_init, |
357 | .c_vendor = "Unknown", | 357 | .c_vendor = "Unknown", |
358 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 358 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
@@ -574,13 +574,15 @@ static void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
574 | } | 574 | } |
575 | } | 575 | } |
576 | 576 | ||
577 | #ifdef CONFIG_X86_64 | ||
578 | if (c->extended_cpuid_level >= 0x80000008) { | 577 | if (c->extended_cpuid_level >= 0x80000008) { |
579 | u32 eax = cpuid_eax(0x80000008); | 578 | u32 eax = cpuid_eax(0x80000008); |
580 | 579 | ||
581 | c->x86_virt_bits = (eax >> 8) & 0xff; | 580 | c->x86_virt_bits = (eax >> 8) & 0xff; |
582 | c->x86_phys_bits = eax & 0xff; | 581 | c->x86_phys_bits = eax & 0xff; |
583 | } | 582 | } |
583 | #ifdef CONFIG_X86_32 | ||
584 | else if (cpu_has(c, X86_FEATURE_PAE) || cpu_has(c, X86_FEATURE_PSE36)) | ||
585 | c->x86_phys_bits = 36; | ||
584 | #endif | 586 | #endif |
585 | 587 | ||
586 | if (c->extended_cpuid_level >= 0x80000007) | 588 | if (c->extended_cpuid_level >= 0x80000007) |
@@ -627,8 +629,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
627 | { | 629 | { |
628 | #ifdef CONFIG_X86_64 | 630 | #ifdef CONFIG_X86_64 |
629 | c->x86_clflush_size = 64; | 631 | c->x86_clflush_size = 64; |
632 | c->x86_phys_bits = 36; | ||
633 | c->x86_virt_bits = 48; | ||
630 | #else | 634 | #else |
631 | c->x86_clflush_size = 32; | 635 | c->x86_clflush_size = 32; |
636 | c->x86_phys_bits = 32; | ||
637 | c->x86_virt_bits = 32; | ||
632 | #endif | 638 | #endif |
633 | c->x86_cache_alignment = c->x86_clflush_size; | 639 | c->x86_cache_alignment = c->x86_clflush_size; |
634 | 640 | ||
@@ -659,12 +665,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
659 | 665 | ||
660 | void __init early_cpu_init(void) | 666 | void __init early_cpu_init(void) |
661 | { | 667 | { |
662 | struct cpu_dev **cdev; | 668 | const struct cpu_dev *const *cdev; |
663 | int count = 0; | 669 | int count = 0; |
664 | 670 | ||
665 | printk(KERN_INFO "KERNEL supported cpus:\n"); | 671 | printk(KERN_INFO "KERNEL supported cpus:\n"); |
666 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 672 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
667 | struct cpu_dev *cpudev = *cdev; | 673 | const struct cpu_dev *cpudev = *cdev; |
668 | unsigned int j; | 674 | unsigned int j; |
669 | 675 | ||
670 | if (count >= X86_VENDOR_NUM) | 676 | if (count >= X86_VENDOR_NUM) |
@@ -751,9 +757,13 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
751 | c->x86_coreid_bits = 0; | 757 | c->x86_coreid_bits = 0; |
752 | #ifdef CONFIG_X86_64 | 758 | #ifdef CONFIG_X86_64 |
753 | c->x86_clflush_size = 64; | 759 | c->x86_clflush_size = 64; |
760 | c->x86_phys_bits = 36; | ||
761 | c->x86_virt_bits = 48; | ||
754 | #else | 762 | #else |
755 | c->cpuid_level = -1; /* CPUID not detected */ | 763 | c->cpuid_level = -1; /* CPUID not detected */ |
756 | c->x86_clflush_size = 32; | 764 | c->x86_clflush_size = 32; |
765 | c->x86_phys_bits = 32; | ||
766 | c->x86_virt_bits = 32; | ||
757 | #endif | 767 | #endif |
758 | c->x86_cache_alignment = c->x86_clflush_size; | 768 | c->x86_cache_alignment = c->x86_clflush_size; |
759 | memset(&c->x86_capability, 0, sizeof c->x86_capability); | 769 | memset(&c->x86_capability, 0, sizeof c->x86_capability); |
@@ -793,7 +803,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
793 | 803 | ||
794 | /* If the model name is still unset, do table lookup. */ | 804 | /* If the model name is still unset, do table lookup. */ |
795 | if (!c->x86_model_id[0]) { | 805 | if (!c->x86_model_id[0]) { |
796 | char *p; | 806 | const char *p; |
797 | p = table_lookup_model(c); | 807 | p = table_lookup_model(c); |
798 | if (p) | 808 | if (p) |
799 | strcpy(c->x86_model_id, p); | 809 | strcpy(c->x86_model_id, p); |
@@ -872,7 +882,7 @@ struct msr_range { | |||
872 | unsigned max; | 882 | unsigned max; |
873 | }; | 883 | }; |
874 | 884 | ||
875 | static struct msr_range msr_range_array[] __cpuinitdata = { | 885 | static const struct msr_range msr_range_array[] __cpuinitconst = { |
876 | { 0x00000000, 0x00000418}, | 886 | { 0x00000000, 0x00000418}, |
877 | { 0xc0000000, 0xc000040b}, | 887 | { 0xc0000000, 0xc000040b}, |
878 | { 0xc0010000, 0xc0010142}, | 888 | { 0xc0010000, 0xc0010142}, |
@@ -921,7 +931,7 @@ __setup("noclflush", setup_noclflush); | |||
921 | 931 | ||
922 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 932 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
923 | { | 933 | { |
924 | char *vendor = NULL; | 934 | const char *vendor = NULL; |
925 | 935 | ||
926 | if (c->x86_vendor < X86_VENDOR_NUM) { | 936 | if (c->x86_vendor < X86_VENDOR_NUM) { |
927 | vendor = this_cpu->c_vendor; | 937 | vendor = this_cpu->c_vendor; |
@@ -1139,8 +1149,7 @@ void __cpuinit cpu_init(void) | |||
1139 | 1149 | ||
1140 | atomic_inc(&init_mm.mm_count); | 1150 | atomic_inc(&init_mm.mm_count); |
1141 | me->active_mm = &init_mm; | 1151 | me->active_mm = &init_mm; |
1142 | if (me->mm) | 1152 | BUG_ON(me->mm); |
1143 | BUG(); | ||
1144 | enter_lazy_tlb(&init_mm, me); | 1153 | enter_lazy_tlb(&init_mm, me); |
1145 | 1154 | ||
1146 | load_sp0(t, ¤t->thread); | 1155 | load_sp0(t, ¤t->thread); |
@@ -1197,8 +1206,7 @@ void __cpuinit cpu_init(void) | |||
1197 | */ | 1206 | */ |
1198 | atomic_inc(&init_mm.mm_count); | 1207 | atomic_inc(&init_mm.mm_count); |
1199 | curr->active_mm = &init_mm; | 1208 | curr->active_mm = &init_mm; |
1200 | if (curr->mm) | 1209 | BUG_ON(curr->mm); |
1201 | BUG(); | ||
1202 | enter_lazy_tlb(&init_mm, curr); | 1210 | enter_lazy_tlb(&init_mm, curr); |
1203 | 1211 | ||
1204 | load_sp0(t, thread); | 1212 | load_sp0(t, thread); |