diff options
author | Chuck Ebbert <76306.1226@compuserve.com> | 2006-03-23 05:59:33 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-03-23 10:38:04 -0500 |
commit | 3bc9b76bede9b3c72088258c7e72eb823f3351d4 (patch) | |
tree | 9f1eed2e6ebc9e7a990178e62b55fb6ee3267d8f | |
parent | 9a0b5817ad97bb718ab85322759d19a238712b47 (diff) |
[PATCH] i386: __devinit should be __cpuinit
Several places in arch/i386/kernel/cpu and kernel/cpu were using __devinit
when they should have been __cpuinit. Fixing that saves ~4K when
CONFIG_HOTPLUG && !CONFIG_HOTPLUG_CPU.
Noticed by Andrew Morton.
Signed-off-by: Chuck Ebbert <76306.1226@compuserve.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 32 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel.c | 12 | ||||
-rw-r--r-- | arch/i386/kernel/cpu/intel_cacheinfo.c | 2 |
3 files changed, 23 insertions, 23 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index e6bd095ae108..f63dcfb16dad 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c | |||
@@ -25,9 +25,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr); | |||
25 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); | 25 | DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); |
26 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); | 26 | EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); |
27 | 27 | ||
28 | static int cachesize_override __devinitdata = -1; | 28 | static int cachesize_override __cpuinitdata = -1; |
29 | static int disable_x86_fxsr __devinitdata = 0; | 29 | static int disable_x86_fxsr __cpuinitdata = 0; |
30 | static int disable_x86_serial_nr __devinitdata = 1; | 30 | static int disable_x86_serial_nr __cpuinitdata = 1; |
31 | 31 | ||
32 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; | 32 | struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; |
33 | 33 | ||
@@ -59,7 +59,7 @@ static int __init cachesize_setup(char *str) | |||
59 | } | 59 | } |
60 | __setup("cachesize=", cachesize_setup); | 60 | __setup("cachesize=", cachesize_setup); |
61 | 61 | ||
62 | int __devinit get_model_name(struct cpuinfo_x86 *c) | 62 | int __cpuinit get_model_name(struct cpuinfo_x86 *c) |
63 | { | 63 | { |
64 | unsigned int *v; | 64 | unsigned int *v; |
65 | char *p, *q; | 65 | char *p, *q; |
@@ -89,7 +89,7 @@ int __devinit get_model_name(struct cpuinfo_x86 *c) | |||
89 | } | 89 | } |
90 | 90 | ||
91 | 91 | ||
92 | void __devinit display_cacheinfo(struct cpuinfo_x86 *c) | 92 | void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c) |
93 | { | 93 | { |
94 | unsigned int n, dummy, ecx, edx, l2size; | 94 | unsigned int n, dummy, ecx, edx, l2size; |
95 | 95 | ||
@@ -130,7 +130,7 @@ void __devinit display_cacheinfo(struct cpuinfo_x86 *c) | |||
130 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ | 130 | /* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ |
131 | 131 | ||
132 | /* Look up CPU names by table lookup. */ | 132 | /* Look up CPU names by table lookup. */ |
133 | static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | 133 | static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c) |
134 | { | 134 | { |
135 | struct cpu_model_info *info; | 135 | struct cpu_model_info *info; |
136 | 136 | ||
@@ -151,7 +151,7 @@ static char __devinit *table_lookup_model(struct cpuinfo_x86 *c) | |||
151 | } | 151 | } |
152 | 152 | ||
153 | 153 | ||
154 | static void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) | 154 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early) |
155 | { | 155 | { |
156 | char *v = c->x86_vendor_id; | 156 | char *v = c->x86_vendor_id; |
157 | int i; | 157 | int i; |
@@ -210,7 +210,7 @@ static inline int flag_is_changeable_p(u32 flag) | |||
210 | 210 | ||
211 | 211 | ||
212 | /* Probe for the CPUID instruction */ | 212 | /* Probe for the CPUID instruction */ |
213 | static int __devinit have_cpuid_p(void) | 213 | static int __cpuinit have_cpuid_p(void) |
214 | { | 214 | { |
215 | return flag_is_changeable_p(X86_EFLAGS_ID); | 215 | return flag_is_changeable_p(X86_EFLAGS_ID); |
216 | } | 216 | } |
@@ -254,7 +254,7 @@ static void __init early_cpu_detect(void) | |||
254 | } | 254 | } |
255 | } | 255 | } |
256 | 256 | ||
257 | void __devinit generic_identify(struct cpuinfo_x86 * c) | 257 | void __cpuinit generic_identify(struct cpuinfo_x86 * c) |
258 | { | 258 | { |
259 | u32 tfms, xlvl; | 259 | u32 tfms, xlvl; |
260 | int junk; | 260 | int junk; |
@@ -307,7 +307,7 @@ void __devinit generic_identify(struct cpuinfo_x86 * c) | |||
307 | #endif | 307 | #endif |
308 | } | 308 | } |
309 | 309 | ||
310 | static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 310 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
311 | { | 311 | { |
312 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { | 312 | if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { |
313 | /* Disable processor serial number */ | 313 | /* Disable processor serial number */ |
@@ -335,7 +335,7 @@ __setup("serialnumber", x86_serial_nr_setup); | |||
335 | /* | 335 | /* |
336 | * This does the hard work of actually picking apart the CPU stuff... | 336 | * This does the hard work of actually picking apart the CPU stuff... |
337 | */ | 337 | */ |
338 | void __devinit identify_cpu(struct cpuinfo_x86 *c) | 338 | void __cpuinit identify_cpu(struct cpuinfo_x86 *c) |
339 | { | 339 | { |
340 | int i; | 340 | int i; |
341 | 341 | ||
@@ -453,7 +453,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c) | |||
453 | } | 453 | } |
454 | 454 | ||
455 | #ifdef CONFIG_X86_HT | 455 | #ifdef CONFIG_X86_HT |
456 | void __devinit detect_ht(struct cpuinfo_x86 *c) | 456 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) |
457 | { | 457 | { |
458 | u32 eax, ebx, ecx, edx; | 458 | u32 eax, ebx, ecx, edx; |
459 | int index_msb, core_bits; | 459 | int index_msb, core_bits; |
@@ -500,7 +500,7 @@ void __devinit detect_ht(struct cpuinfo_x86 *c) | |||
500 | } | 500 | } |
501 | #endif | 501 | #endif |
502 | 502 | ||
503 | void __devinit print_cpu_info(struct cpuinfo_x86 *c) | 503 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) |
504 | { | 504 | { |
505 | char *vendor = NULL; | 505 | char *vendor = NULL; |
506 | 506 | ||
@@ -523,7 +523,7 @@ void __devinit print_cpu_info(struct cpuinfo_x86 *c) | |||
523 | printk("\n"); | 523 | printk("\n"); |
524 | } | 524 | } |
525 | 525 | ||
526 | cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE; | 526 | cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE; |
527 | 527 | ||
528 | /* This is hacky. :) | 528 | /* This is hacky. :) |
529 | * We're emulating future behavior. | 529 | * We're emulating future behavior. |
@@ -570,7 +570,7 @@ void __init early_cpu_init(void) | |||
570 | * and IDT. We reload them nevertheless, this function acts as a | 570 | * and IDT. We reload them nevertheless, this function acts as a |
571 | * 'CPU state barrier', nothing should get across. | 571 | * 'CPU state barrier', nothing should get across. |
572 | */ | 572 | */ |
573 | void __devinit cpu_init(void) | 573 | void __cpuinit cpu_init(void) |
574 | { | 574 | { |
575 | int cpu = smp_processor_id(); | 575 | int cpu = smp_processor_id(); |
576 | struct tss_struct * t = &per_cpu(init_tss, cpu); | 576 | struct tss_struct * t = &per_cpu(init_tss, cpu); |
@@ -670,7 +670,7 @@ void __devinit cpu_init(void) | |||
670 | } | 670 | } |
671 | 671 | ||
672 | #ifdef CONFIG_HOTPLUG_CPU | 672 | #ifdef CONFIG_HOTPLUG_CPU |
673 | void __devinit cpu_uninit(void) | 673 | void __cpuinit cpu_uninit(void) |
674 | { | 674 | { |
675 | int cpu = raw_smp_processor_id(); | 675 | int cpu = raw_smp_processor_id(); |
676 | cpu_clear(cpu, cpu_initialized); | 676 | cpu_clear(cpu, cpu_initialized); |
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c index 8c0120186b9f..5386b29bb5a5 100644 --- a/arch/i386/kernel/cpu/intel.c +++ b/arch/i386/kernel/cpu/intel.c | |||
@@ -29,7 +29,7 @@ extern int trap_init_f00f_bug(void); | |||
29 | struct movsl_mask movsl_mask __read_mostly; | 29 | struct movsl_mask movsl_mask __read_mostly; |
30 | #endif | 30 | #endif |
31 | 31 | ||
32 | void __devinit early_intel_workaround(struct cpuinfo_x86 *c) | 32 | void __cpuinit early_intel_workaround(struct cpuinfo_x86 *c) |
33 | { | 33 | { |
34 | if (c->x86_vendor != X86_VENDOR_INTEL) | 34 | if (c->x86_vendor != X86_VENDOR_INTEL) |
35 | return; | 35 | return; |
@@ -44,7 +44,7 @@ void __devinit early_intel_workaround(struct cpuinfo_x86 *c) | |||
44 | * This is called before we do cpu ident work | 44 | * This is called before we do cpu ident work |
45 | */ | 45 | */ |
46 | 46 | ||
47 | int __devinit ppro_with_ram_bug(void) | 47 | int __cpuinit ppro_with_ram_bug(void) |
48 | { | 48 | { |
49 | /* Uses data from early_cpu_detect now */ | 49 | /* Uses data from early_cpu_detect now */ |
50 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 50 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
@@ -62,7 +62,7 @@ int __devinit ppro_with_ram_bug(void) | |||
62 | * P4 Xeon errata 037 workaround. | 62 | * P4 Xeon errata 037 workaround. |
63 | * Hardware prefetcher may cause stale data to be loaded into the cache. | 63 | * Hardware prefetcher may cause stale data to be loaded into the cache. |
64 | */ | 64 | */ |
65 | static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | 65 | static void __cpuinit Intel_errata_workarounds(struct cpuinfo_x86 *c) |
66 | { | 66 | { |
67 | unsigned long lo, hi; | 67 | unsigned long lo, hi; |
68 | 68 | ||
@@ -81,7 +81,7 @@ static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c) | |||
81 | /* | 81 | /* |
82 | * find out the number of processor cores on the die | 82 | * find out the number of processor cores on the die |
83 | */ | 83 | */ |
84 | static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) | 84 | static int __cpuinit num_cpu_cores(struct cpuinfo_x86 *c) |
85 | { | 85 | { |
86 | unsigned int eax, ebx, ecx, edx; | 86 | unsigned int eax, ebx, ecx, edx; |
87 | 87 | ||
@@ -96,7 +96,7 @@ static int __devinit num_cpu_cores(struct cpuinfo_x86 *c) | |||
96 | return 1; | 96 | return 1; |
97 | } | 97 | } |
98 | 98 | ||
99 | static void __devinit init_intel(struct cpuinfo_x86 *c) | 99 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) |
100 | { | 100 | { |
101 | unsigned int l2 = 0; | 101 | unsigned int l2 = 0; |
102 | char *p = NULL; | 102 | char *p = NULL; |
@@ -205,7 +205,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size) | |||
205 | return size; | 205 | return size; |
206 | } | 206 | } |
207 | 207 | ||
208 | static struct cpu_dev intel_cpu_dev __devinitdata = { | 208 | static struct cpu_dev intel_cpu_dev __cpuinitdata = { |
209 | .c_vendor = "Intel", | 209 | .c_vendor = "Intel", |
210 | .c_ident = { "GenuineIntel" }, | 210 | .c_ident = { "GenuineIntel" }, |
211 | .c_models = { | 211 | .c_models = { |
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c index ffe58cee0c48..36c9b3706637 100644 --- a/arch/i386/kernel/cpu/intel_cacheinfo.c +++ b/arch/i386/kernel/cpu/intel_cacheinfo.c | |||
@@ -330,7 +330,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
330 | } | 330 | } |
331 | } | 331 | } |
332 | } | 332 | } |
333 | static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | 333 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) |
334 | { | 334 | { |
335 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 335 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
336 | int sibling; | 336 | int sibling; |