diff options
author | Jan Beulich <jbeulich@novell.com> | 2009-03-12 08:08:49 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-12 08:13:07 -0400 |
commit | 02dde8b45c5460794b9052d7c12939fe3eb63c2c (patch) | |
tree | 5c24bd6411e3fb704c81694f8f63e8bd963848ad /arch/x86/kernel/cpu/common.c | |
parent | 821508d4ef7920283b960057903505fed609fd16 (diff) |
x86: move various CPU initialization objects into .cpuinit.rodata
Impact: debuggability and micro-optimization
Putting whatever is possible into the (final) .rodata section increases
the likelihood of catching memory corruption bugs early, and reduces
false cache line sharing.
Signed-off-by: Jan Beulich <jbeulich@novell.com>
LKML-Reference: <49B90961.76E4.0078.0@novell.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/common.c')
-rw-r--r-- | arch/x86/kernel/cpu/common.c | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f8869978bbb7..54cbe7690f93 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -70,7 +70,7 @@ cpumask_t cpu_sibling_setup_map; | |||
70 | #endif /* CONFIG_X86_32 */ | 70 | #endif /* CONFIG_X86_32 */ |
71 | 71 | ||
72 | 72 | ||
73 | static struct cpu_dev *this_cpu __cpuinitdata; | 73 | static const struct cpu_dev *this_cpu __cpuinitdata; |
74 | 74 | ||
75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 75 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
76 | #ifdef CONFIG_X86_64 | 76 | #ifdef CONFIG_X86_64 |
@@ -274,9 +274,9 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
274 | */ | 274 | */ |
275 | 275 | ||
276 | /* Look up CPU names by table lookup. */ | 276 | /* Look up CPU names by table lookup. */ |
277 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) | 277 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) |
278 | { | 278 | { |
279 | struct cpu_model_info *info; | 279 | const struct cpu_model_info *info; |
280 | 280 | ||
281 | if (c->x86_model >= 16) | 281 | if (c->x86_model >= 16) |
282 | return NULL; /* Range check */ | 282 | return NULL; /* Range check */ |
@@ -321,7 +321,7 @@ void switch_to_new_gdt(int cpu) | |||
321 | load_percpu_segment(cpu); | 321 | load_percpu_segment(cpu); |
322 | } | 322 | } |
323 | 323 | ||
324 | static struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; | 324 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; |
325 | 325 | ||
326 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 326 | static void __cpuinit default_init(struct cpuinfo_x86 *c) |
327 | { | 327 | { |
@@ -340,7 +340,7 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
340 | #endif | 340 | #endif |
341 | } | 341 | } |
342 | 342 | ||
343 | static struct cpu_dev __cpuinitdata default_cpu = { | 343 | static const struct cpu_dev __cpuinitconst default_cpu = { |
344 | .c_init = default_init, | 344 | .c_init = default_init, |
345 | .c_vendor = "Unknown", | 345 | .c_vendor = "Unknown", |
346 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 346 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
@@ -634,12 +634,12 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) | |||
634 | 634 | ||
635 | void __init early_cpu_init(void) | 635 | void __init early_cpu_init(void) |
636 | { | 636 | { |
637 | struct cpu_dev **cdev; | 637 | const struct cpu_dev *const *cdev; |
638 | int count = 0; | 638 | int count = 0; |
639 | 639 | ||
640 | printk("KERNEL supported cpus:\n"); | 640 | printk("KERNEL supported cpus:\n"); |
641 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { | 641 | for (cdev = __x86_cpu_dev_start; cdev < __x86_cpu_dev_end; cdev++) { |
642 | struct cpu_dev *cpudev = *cdev; | 642 | const struct cpu_dev *cpudev = *cdev; |
643 | unsigned int j; | 643 | unsigned int j; |
644 | 644 | ||
645 | if (count >= X86_VENDOR_NUM) | 645 | if (count >= X86_VENDOR_NUM) |
@@ -768,7 +768,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | |||
768 | 768 | ||
769 | /* If the model name is still unset, do table lookup. */ | 769 | /* If the model name is still unset, do table lookup. */ |
770 | if (!c->x86_model_id[0]) { | 770 | if (!c->x86_model_id[0]) { |
771 | char *p; | 771 | const char *p; |
772 | p = table_lookup_model(c); | 772 | p = table_lookup_model(c); |
773 | if (p) | 773 | if (p) |
774 | strcpy(c->x86_model_id, p); | 774 | strcpy(c->x86_model_id, p); |
@@ -847,7 +847,7 @@ struct msr_range { | |||
847 | unsigned max; | 847 | unsigned max; |
848 | }; | 848 | }; |
849 | 849 | ||
850 | static struct msr_range msr_range_array[] __cpuinitdata = { | 850 | static const struct msr_range msr_range_array[] __cpuinitconst = { |
851 | { 0x00000000, 0x00000418}, | 851 | { 0x00000000, 0x00000418}, |
852 | { 0xc0000000, 0xc000040b}, | 852 | { 0xc0000000, 0xc000040b}, |
853 | { 0xc0010000, 0xc0010142}, | 853 | { 0xc0010000, 0xc0010142}, |
@@ -894,7 +894,7 @@ __setup("noclflush", setup_noclflush); | |||
894 | 894 | ||
895 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 895 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
896 | { | 896 | { |
897 | char *vendor = NULL; | 897 | const char *vendor = NULL; |
898 | 898 | ||
899 | if (c->x86_vendor < X86_VENDOR_NUM) | 899 | if (c->x86_vendor < X86_VENDOR_NUM) |
900 | vendor = this_cpu->c_vendor; | 900 | vendor = this_cpu->c_vendor; |