diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2013-06-18 18:23:59 -0400 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2013-07-14 19:36:56 -0400 |
commit | 148f9bb87745ed45f7a11b2cbd3bc0f017d5d257 (patch) | |
tree | 88a21d992eae94a05cc30ddbc2c71465701ec3aa /arch/x86 | |
parent | 70e2a7bf23a0c412b908ba260e790a4f51c9f2b0 (diff) |
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
Note that some harmless section mismatch warnings may result, since
notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c)
are flagged as __cpuinit -- so if we remove the __cpuinit from
arch specific callers, we will also get section mismatch warnings.
As an intermediate step, we intend to turn the linux/init.h cpuinit
content into no-ops as early as possible, since that will get rid
of these warnings. In any case, they are temporary and harmless.
This removes all the arch/x86 uses of the __cpuinit macros from
all C files. x86 only had the one __CPUINIT used in assembly files,
and it wasn't paired off with a .previous or a __FINIT, so we can
delete it directly w/o any corresponding additional change there.
[1] https://lkml.org/lkml/2013/5/20/589
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: x86@kernel.org
Acked-by: Ingo Molnar <mingo@kernel.org>
Acked-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: H. Peter Anvin <hpa@linux.intel.com>
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'arch/x86')
71 files changed, 345 insertions, 356 deletions
diff --git a/arch/x86/include/asm/cpu.h b/arch/x86/include/asm/cpu.h index 5f9a1243190e..d2b12988d2ed 100644 --- a/arch/x86/include/asm/cpu.h +++ b/arch/x86/include/asm/cpu.h | |||
@@ -28,7 +28,7 @@ struct x86_cpu { | |||
28 | #ifdef CONFIG_HOTPLUG_CPU | 28 | #ifdef CONFIG_HOTPLUG_CPU |
29 | extern int arch_register_cpu(int num); | 29 | extern int arch_register_cpu(int num); |
30 | extern void arch_unregister_cpu(int); | 30 | extern void arch_unregister_cpu(int); |
31 | extern void __cpuinit start_cpu0(void); | 31 | extern void start_cpu0(void); |
32 | #ifdef CONFIG_DEBUG_HOTPLUG_CPU0 | 32 | #ifdef CONFIG_DEBUG_HOTPLUG_CPU0 |
33 | extern int _debug_hotplug_cpu(int cpu, int action); | 33 | extern int _debug_hotplug_cpu(int cpu, int action); |
34 | #endif | 34 | #endif |
diff --git a/arch/x86/include/asm/microcode.h b/arch/x86/include/asm/microcode.h index 6bc3985ee473..f98bd6625318 100644 --- a/arch/x86/include/asm/microcode.h +++ b/arch/x86/include/asm/microcode.h | |||
@@ -60,11 +60,11 @@ static inline void __exit exit_amd_microcode(void) {} | |||
60 | #ifdef CONFIG_MICROCODE_EARLY | 60 | #ifdef CONFIG_MICROCODE_EARLY |
61 | #define MAX_UCODE_COUNT 128 | 61 | #define MAX_UCODE_COUNT 128 |
62 | extern void __init load_ucode_bsp(void); | 62 | extern void __init load_ucode_bsp(void); |
63 | extern void __cpuinit load_ucode_ap(void); | 63 | extern void load_ucode_ap(void); |
64 | extern int __init save_microcode_in_initrd(void); | 64 | extern int __init save_microcode_in_initrd(void); |
65 | #else | 65 | #else |
66 | static inline void __init load_ucode_bsp(void) {} | 66 | static inline void __init load_ucode_bsp(void) {} |
67 | static inline void __cpuinit load_ucode_ap(void) {} | 67 | static inline void load_ucode_ap(void) {} |
68 | static inline int __init save_microcode_in_initrd(void) | 68 | static inline int __init save_microcode_in_initrd(void) |
69 | { | 69 | { |
70 | return 0; | 70 | return 0; |
diff --git a/arch/x86/include/asm/microcode_amd.h b/arch/x86/include/asm/microcode_amd.h index c6b043f40271..50e5c58ced23 100644 --- a/arch/x86/include/asm/microcode_amd.h +++ b/arch/x86/include/asm/microcode_amd.h | |||
@@ -67,11 +67,11 @@ extern enum ucode_state load_microcode_amd(int cpu, const u8 *data, size_t size) | |||
67 | extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; | 67 | extern u8 amd_bsp_mpb[MPB_MAX_SIZE]; |
68 | #endif | 68 | #endif |
69 | extern void __init load_ucode_amd_bsp(void); | 69 | extern void __init load_ucode_amd_bsp(void); |
70 | extern void __cpuinit load_ucode_amd_ap(void); | 70 | extern void load_ucode_amd_ap(void); |
71 | extern int __init save_microcode_in_initrd_amd(void); | 71 | extern int __init save_microcode_in_initrd_amd(void); |
72 | #else | 72 | #else |
73 | static inline void __init load_ucode_amd_bsp(void) {} | 73 | static inline void __init load_ucode_amd_bsp(void) {} |
74 | static inline void __cpuinit load_ucode_amd_ap(void) {} | 74 | static inline void load_ucode_amd_ap(void) {} |
75 | static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } | 75 | static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; } |
76 | #endif | 76 | #endif |
77 | 77 | ||
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h index 87a085333cbf..9067166409bf 100644 --- a/arch/x86/include/asm/microcode_intel.h +++ b/arch/x86/include/asm/microcode_intel.h | |||
@@ -65,12 +65,12 @@ update_match_revision(struct microcode_header_intel *mc_header, int rev); | |||
65 | 65 | ||
66 | #ifdef CONFIG_MICROCODE_INTEL_EARLY | 66 | #ifdef CONFIG_MICROCODE_INTEL_EARLY |
67 | extern void __init load_ucode_intel_bsp(void); | 67 | extern void __init load_ucode_intel_bsp(void); |
68 | extern void __cpuinit load_ucode_intel_ap(void); | 68 | extern void load_ucode_intel_ap(void); |
69 | extern void show_ucode_info_early(void); | 69 | extern void show_ucode_info_early(void); |
70 | extern int __init save_microcode_in_initrd_intel(void); | 70 | extern int __init save_microcode_in_initrd_intel(void); |
71 | #else | 71 | #else |
72 | static inline __init void load_ucode_intel_bsp(void) {} | 72 | static inline __init void load_ucode_intel_bsp(void) {} |
73 | static inline __cpuinit void load_ucode_intel_ap(void) {} | 73 | static inline void load_ucode_intel_ap(void) {} |
74 | static inline void show_ucode_info_early(void) {} | 74 | static inline void show_ucode_info_early(void) {} |
75 | static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } | 75 | static inline int __init save_microcode_in_initrd_intel(void) { return -EINVAL; } |
76 | #endif | 76 | #endif |
diff --git a/arch/x86/include/asm/mmconfig.h b/arch/x86/include/asm/mmconfig.h index 9b119da1d105..04a3fed22cfe 100644 --- a/arch/x86/include/asm/mmconfig.h +++ b/arch/x86/include/asm/mmconfig.h | |||
@@ -2,8 +2,8 @@ | |||
2 | #define _ASM_X86_MMCONFIG_H | 2 | #define _ASM_X86_MMCONFIG_H |
3 | 3 | ||
4 | #ifdef CONFIG_PCI_MMCONFIG | 4 | #ifdef CONFIG_PCI_MMCONFIG |
5 | extern void __cpuinit fam10h_check_enable_mmcfg(void); | 5 | extern void fam10h_check_enable_mmcfg(void); |
6 | extern void __cpuinit check_enable_amd_mmconf_dmi(void); | 6 | extern void check_enable_amd_mmconf_dmi(void); |
7 | #else | 7 | #else |
8 | static inline void fam10h_check_enable_mmcfg(void) { } | 8 | static inline void fam10h_check_enable_mmcfg(void) { } |
9 | static inline void check_enable_amd_mmconf_dmi(void) { } | 9 | static inline void check_enable_amd_mmconf_dmi(void) { } |
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index 3e2f42a4b872..626cf70082d7 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -94,7 +94,7 @@ static inline void early_reserve_e820_mpc_new(void) { } | |||
94 | #define default_get_smp_config x86_init_uint_noop | 94 | #define default_get_smp_config x86_init_uint_noop |
95 | #endif | 95 | #endif |
96 | 96 | ||
97 | void __cpuinit generic_processor_info(int apicid, int version); | 97 | void generic_processor_info(int apicid, int version); |
98 | #ifdef CONFIG_ACPI | 98 | #ifdef CONFIG_ACPI |
99 | extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); | 99 | extern void mp_register_ioapic(int id, u32 address, u32 gsi_base); |
100 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, | 100 | extern void mp_override_legacy_irq(u8 bus_irq, u8 polarity, u8 trigger, |
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 1b99ee5c9f00..4064acae625d 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h | |||
@@ -39,7 +39,7 @@ static inline void set_apicid_to_node(int apicid, s16 node) | |||
39 | __apicid_to_node[apicid] = node; | 39 | __apicid_to_node[apicid] = node; |
40 | } | 40 | } |
41 | 41 | ||
42 | extern int __cpuinit numa_cpu_node(int cpu); | 42 | extern int numa_cpu_node(int cpu); |
43 | 43 | ||
44 | #else /* CONFIG_NUMA */ | 44 | #else /* CONFIG_NUMA */ |
45 | static inline void set_apicid_to_node(int apicid, s16 node) | 45 | static inline void set_apicid_to_node(int apicid, s16 node) |
@@ -60,8 +60,8 @@ static inline int numa_cpu_node(int cpu) | |||
60 | extern void numa_set_node(int cpu, int node); | 60 | extern void numa_set_node(int cpu, int node); |
61 | extern void numa_clear_node(int cpu); | 61 | extern void numa_clear_node(int cpu); |
62 | extern void __init init_cpu_to_node(void); | 62 | extern void __init init_cpu_to_node(void); |
63 | extern void __cpuinit numa_add_cpu(int cpu); | 63 | extern void numa_add_cpu(int cpu); |
64 | extern void __cpuinit numa_remove_cpu(int cpu); | 64 | extern void numa_remove_cpu(int cpu); |
65 | #else /* CONFIG_NUMA */ | 65 | #else /* CONFIG_NUMA */ |
66 | static inline void numa_set_node(int cpu, int node) { } | 66 | static inline void numa_set_node(int cpu, int node) { } |
67 | static inline void numa_clear_node(int cpu) { } | 67 | static inline void numa_clear_node(int cpu) { } |
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 29937c4f6ff8..24cf5aefb704 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h | |||
@@ -164,7 +164,7 @@ extern const struct seq_operations cpuinfo_op; | |||
164 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) | 164 | #define cache_line_size() (boot_cpu_data.x86_cache_alignment) |
165 | 165 | ||
166 | extern void cpu_detect(struct cpuinfo_x86 *c); | 166 | extern void cpu_detect(struct cpuinfo_x86 *c); |
167 | extern void __cpuinit fpu_detect(struct cpuinfo_x86 *c); | 167 | extern void fpu_detect(struct cpuinfo_x86 *c); |
168 | 168 | ||
169 | extern void early_cpu_init(void); | 169 | extern void early_cpu_init(void); |
170 | extern void identify_boot_cpu(void); | 170 | extern void identify_boot_cpu(void); |
diff --git a/arch/x86/include/asm/prom.h b/arch/x86/include/asm/prom.h index 60bef663609a..bade6ac3b14f 100644 --- a/arch/x86/include/asm/prom.h +++ b/arch/x86/include/asm/prom.h | |||
@@ -27,7 +27,7 @@ extern int of_ioapic; | |||
27 | extern u64 initial_dtb; | 27 | extern u64 initial_dtb; |
28 | extern void add_dtb(u64 data); | 28 | extern void add_dtb(u64 data); |
29 | extern void x86_add_irq_domains(void); | 29 | extern void x86_add_irq_domains(void); |
30 | void __cpuinit x86_of_pci_init(void); | 30 | void x86_of_pci_init(void); |
31 | void x86_dtb_init(void); | 31 | void x86_dtb_init(void); |
32 | #else | 32 | #else |
33 | static inline void add_dtb(u64 data) { } | 33 | static inline void add_dtb(u64 data) { } |
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h index b073aaea747c..4137890e88e3 100644 --- a/arch/x86/include/asm/smp.h +++ b/arch/x86/include/asm/smp.h | |||
@@ -179,7 +179,7 @@ static inline int wbinvd_on_all_cpus(void) | |||
179 | } | 179 | } |
180 | #endif /* CONFIG_SMP */ | 180 | #endif /* CONFIG_SMP */ |
181 | 181 | ||
182 | extern unsigned disabled_cpus __cpuinitdata; | 182 | extern unsigned disabled_cpus; |
183 | 183 | ||
184 | #ifdef CONFIG_X86_32_SMP | 184 | #ifdef CONFIG_X86_32_SMP |
185 | /* | 185 | /* |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index d81a972dd506..2627a81253ee 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -195,7 +195,7 @@ static int __init acpi_parse_madt(struct acpi_table_header *table) | |||
195 | return 0; | 195 | return 0; |
196 | } | 196 | } |
197 | 197 | ||
198 | static void __cpuinit acpi_register_lapic(int id, u8 enabled) | 198 | static void acpi_register_lapic(int id, u8 enabled) |
199 | { | 199 | { |
200 | unsigned int ver = 0; | 200 | unsigned int ver = 0; |
201 | 201 | ||
@@ -607,7 +607,7 @@ void __init acpi_set_irq_model_ioapic(void) | |||
607 | #ifdef CONFIG_ACPI_HOTPLUG_CPU | 607 | #ifdef CONFIG_ACPI_HOTPLUG_CPU |
608 | #include <acpi/processor.h> | 608 | #include <acpi/processor.h> |
609 | 609 | ||
610 | static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | 610 | static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) |
611 | { | 611 | { |
612 | #ifdef CONFIG_ACPI_NUMA | 612 | #ifdef CONFIG_ACPI_NUMA |
613 | int nid; | 613 | int nid; |
@@ -620,7 +620,7 @@ static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | |||
620 | #endif | 620 | #endif |
621 | } | 621 | } |
622 | 622 | ||
623 | static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) | 623 | static int _acpi_map_lsapic(acpi_handle handle, int *pcpu) |
624 | { | 624 | { |
625 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; | 625 | struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; |
626 | union acpi_object *obj; | 626 | union acpi_object *obj; |
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 99663b59123a..eca89c53a7f5 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -58,7 +58,7 @@ | |||
58 | 58 | ||
59 | unsigned int num_processors; | 59 | unsigned int num_processors; |
60 | 60 | ||
61 | unsigned disabled_cpus __cpuinitdata; | 61 | unsigned disabled_cpus; |
62 | 62 | ||
63 | /* Processor that is doing the boot up */ | 63 | /* Processor that is doing the boot up */ |
64 | unsigned int boot_cpu_physical_apicid = -1U; | 64 | unsigned int boot_cpu_physical_apicid = -1U; |
@@ -544,7 +544,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events); | |||
544 | * Setup the local APIC timer for this CPU. Copy the initialized values | 544 | * Setup the local APIC timer for this CPU. Copy the initialized values |
545 | * of the boot CPU and register the clock event in the framework. | 545 | * of the boot CPU and register the clock event in the framework. |
546 | */ | 546 | */ |
547 | static void __cpuinit setup_APIC_timer(void) | 547 | static void setup_APIC_timer(void) |
548 | { | 548 | { |
549 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); | 549 | struct clock_event_device *levt = &__get_cpu_var(lapic_events); |
550 | 550 | ||
@@ -866,7 +866,7 @@ void __init setup_boot_APIC_clock(void) | |||
866 | setup_APIC_timer(); | 866 | setup_APIC_timer(); |
867 | } | 867 | } |
868 | 868 | ||
869 | void __cpuinit setup_secondary_APIC_clock(void) | 869 | void setup_secondary_APIC_clock(void) |
870 | { | 870 | { |
871 | setup_APIC_timer(); | 871 | setup_APIC_timer(); |
872 | } | 872 | } |
@@ -1229,7 +1229,7 @@ void __init init_bsp_APIC(void) | |||
1229 | apic_write(APIC_LVT1, value); | 1229 | apic_write(APIC_LVT1, value); |
1230 | } | 1230 | } |
1231 | 1231 | ||
1232 | static void __cpuinit lapic_setup_esr(void) | 1232 | static void lapic_setup_esr(void) |
1233 | { | 1233 | { |
1234 | unsigned int oldvalue, value, maxlvt; | 1234 | unsigned int oldvalue, value, maxlvt; |
1235 | 1235 | ||
@@ -1276,7 +1276,7 @@ static void __cpuinit lapic_setup_esr(void) | |||
1276 | * Used to setup local APIC while initializing BSP or bringin up APs. | 1276 | * Used to setup local APIC while initializing BSP or bringin up APs. |
1277 | * Always called with preemption disabled. | 1277 | * Always called with preemption disabled. |
1278 | */ | 1278 | */ |
1279 | void __cpuinit setup_local_APIC(void) | 1279 | void setup_local_APIC(void) |
1280 | { | 1280 | { |
1281 | int cpu = smp_processor_id(); | 1281 | int cpu = smp_processor_id(); |
1282 | unsigned int value, queued; | 1282 | unsigned int value, queued; |
@@ -1471,7 +1471,7 @@ void __cpuinit setup_local_APIC(void) | |||
1471 | #endif | 1471 | #endif |
1472 | } | 1472 | } |
1473 | 1473 | ||
1474 | void __cpuinit end_local_APIC_setup(void) | 1474 | void end_local_APIC_setup(void) |
1475 | { | 1475 | { |
1476 | lapic_setup_esr(); | 1476 | lapic_setup_esr(); |
1477 | 1477 | ||
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup) | |||
2107 | apic_write(APIC_LVT1, value); | 2107 | apic_write(APIC_LVT1, value); |
2108 | } | 2108 | } |
2109 | 2109 | ||
2110 | void __cpuinit generic_processor_info(int apicid, int version) | 2110 | void generic_processor_info(int apicid, int version) |
2111 | { | 2111 | { |
2112 | int cpu, max = nr_cpu_ids; | 2112 | int cpu, max = nr_cpu_ids; |
2113 | bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, | 2113 | bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, |
@@ -2377,7 +2377,7 @@ static struct syscore_ops lapic_syscore_ops = { | |||
2377 | .suspend = lapic_suspend, | 2377 | .suspend = lapic_suspend, |
2378 | }; | 2378 | }; |
2379 | 2379 | ||
2380 | static void __cpuinit apic_pm_activate(void) | 2380 | static void apic_pm_activate(void) |
2381 | { | 2381 | { |
2382 | apic_pm_state.active = 1; | 2382 | apic_pm_state.active = 1; |
2383 | } | 2383 | } |
@@ -2402,7 +2402,7 @@ static void apic_pm_activate(void) { } | |||
2402 | 2402 | ||
2403 | #ifdef CONFIG_X86_64 | 2403 | #ifdef CONFIG_X86_64 |
2404 | 2404 | ||
2405 | static int __cpuinit apic_cluster_num(void) | 2405 | static int apic_cluster_num(void) |
2406 | { | 2406 | { |
2407 | int i, clusters, zeros; | 2407 | int i, clusters, zeros; |
2408 | unsigned id; | 2408 | unsigned id; |
@@ -2447,10 +2447,10 @@ static int __cpuinit apic_cluster_num(void) | |||
2447 | return clusters; | 2447 | return clusters; |
2448 | } | 2448 | } |
2449 | 2449 | ||
2450 | static int __cpuinitdata multi_checked; | 2450 | static int multi_checked; |
2451 | static int __cpuinitdata multi; | 2451 | static int multi; |
2452 | 2452 | ||
2453 | static int __cpuinit set_multi(const struct dmi_system_id *d) | 2453 | static int set_multi(const struct dmi_system_id *d) |
2454 | { | 2454 | { |
2455 | if (multi) | 2455 | if (multi) |
2456 | return 0; | 2456 | return 0; |
@@ -2459,7 +2459,7 @@ static int __cpuinit set_multi(const struct dmi_system_id *d) | |||
2459 | return 0; | 2459 | return 0; |
2460 | } | 2460 | } |
2461 | 2461 | ||
2462 | static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { | 2462 | static const struct dmi_system_id multi_dmi_table[] = { |
2463 | { | 2463 | { |
2464 | .callback = set_multi, | 2464 | .callback = set_multi, |
2465 | .ident = "IBM System Summit2", | 2465 | .ident = "IBM System Summit2", |
@@ -2471,7 +2471,7 @@ static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { | |||
2471 | {} | 2471 | {} |
2472 | }; | 2472 | }; |
2473 | 2473 | ||
2474 | static void __cpuinit dmi_check_multi(void) | 2474 | static void dmi_check_multi(void) |
2475 | { | 2475 | { |
2476 | if (multi_checked) | 2476 | if (multi_checked) |
2477 | return; | 2477 | return; |
@@ -2488,7 +2488,7 @@ static void __cpuinit dmi_check_multi(void) | |||
2488 | * multi-chassis. | 2488 | * multi-chassis. |
2489 | * Use DMI to check them | 2489 | * Use DMI to check them |
2490 | */ | 2490 | */ |
2491 | __cpuinit int apic_is_clustered_box(void) | 2491 | int apic_is_clustered_box(void) |
2492 | { | 2492 | { |
2493 | dmi_check_multi(); | 2493 | dmi_check_multi(); |
2494 | if (multi) | 2494 | if (multi) |
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c index 9a9110918ca7..3e67f9e3d7ef 100644 --- a/arch/x86/kernel/apic/apic_numachip.c +++ b/arch/x86/kernel/apic/apic_numachip.c | |||
@@ -74,7 +74,7 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb) | |||
74 | return initial_apic_id >> index_msb; | 74 | return initial_apic_id >> index_msb; |
75 | } | 75 | } |
76 | 76 | ||
77 | static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) | 77 | static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) |
78 | { | 78 | { |
79 | union numachip_csr_g3_ext_irq_gen int_gen; | 79 | union numachip_csr_g3_ext_irq_gen int_gen; |
80 | 80 | ||
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c index 0874799a98c6..c55224731b2d 100644 --- a/arch/x86/kernel/apic/es7000_32.c +++ b/arch/x86/kernel/apic/es7000_32.c | |||
@@ -130,7 +130,7 @@ int es7000_plat; | |||
130 | */ | 130 | */ |
131 | 131 | ||
132 | 132 | ||
133 | static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) | 133 | static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) |
134 | { | 134 | { |
135 | unsigned long vect = 0, psaival = 0; | 135 | unsigned long vect = 0, psaival = 0; |
136 | 136 | ||
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c index d661ee95cabf..1e42e8f305ee 100644 --- a/arch/x86/kernel/apic/numaq_32.c +++ b/arch/x86/kernel/apic/numaq_32.c | |||
@@ -105,7 +105,7 @@ static void __init smp_dump_qct(void) | |||
105 | } | 105 | } |
106 | } | 106 | } |
107 | 107 | ||
108 | void __cpuinit numaq_tsc_disable(void) | 108 | void numaq_tsc_disable(void) |
109 | { | 109 | { |
110 | if (!found_numaq) | 110 | if (!found_numaq) |
111 | return; | 111 | return; |
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c index c88baa4ff0e5..140e29db478d 100644 --- a/arch/x86/kernel/apic/x2apic_cluster.c +++ b/arch/x86/kernel/apic/x2apic_cluster.c | |||
@@ -148,7 +148,7 @@ static void init_x2apic_ldr(void) | |||
148 | /* | 148 | /* |
149 | * At CPU state changes, update the x2apic cluster sibling info. | 149 | * At CPU state changes, update the x2apic cluster sibling info. |
150 | */ | 150 | */ |
151 | static int __cpuinit | 151 | static int |
152 | update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) | 152 | update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) |
153 | { | 153 | { |
154 | unsigned int this_cpu = (unsigned long)hcpu; | 154 | unsigned int this_cpu = (unsigned long)hcpu; |
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c index 63092afb142e..1191ac1c9d25 100644 --- a/arch/x86/kernel/apic/x2apic_uv_x.c +++ b/arch/x86/kernel/apic/x2apic_uv_x.c | |||
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(uv_possible_blades); | |||
209 | unsigned long sn_rtc_cycles_per_second; | 209 | unsigned long sn_rtc_cycles_per_second; |
210 | EXPORT_SYMBOL(sn_rtc_cycles_per_second); | 210 | EXPORT_SYMBOL(sn_rtc_cycles_per_second); |
211 | 211 | ||
212 | static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) | 212 | static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) |
213 | { | 213 | { |
214 | #ifdef CONFIG_SMP | 214 | #ifdef CONFIG_SMP |
215 | unsigned long val; | 215 | unsigned long val; |
@@ -416,7 +416,7 @@ static struct apic __refdata apic_x2apic_uv_x = { | |||
416 | .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, | 416 | .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, |
417 | }; | 417 | }; |
418 | 418 | ||
419 | static __cpuinit void set_x2apic_extra_bits(int pnode) | 419 | static void set_x2apic_extra_bits(int pnode) |
420 | { | 420 | { |
421 | __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); | 421 | __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); |
422 | } | 422 | } |
@@ -735,7 +735,7 @@ static void uv_heartbeat(unsigned long ignored) | |||
735 | mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); | 735 | mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); |
736 | } | 736 | } |
737 | 737 | ||
738 | static void __cpuinit uv_heartbeat_enable(int cpu) | 738 | static void uv_heartbeat_enable(int cpu) |
739 | { | 739 | { |
740 | while (!uv_cpu_hub_info(cpu)->scir.enabled) { | 740 | while (!uv_cpu_hub_info(cpu)->scir.enabled) { |
741 | struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; | 741 | struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; |
@@ -752,7 +752,7 @@ static void __cpuinit uv_heartbeat_enable(int cpu) | |||
752 | } | 752 | } |
753 | 753 | ||
754 | #ifdef CONFIG_HOTPLUG_CPU | 754 | #ifdef CONFIG_HOTPLUG_CPU |
755 | static void __cpuinit uv_heartbeat_disable(int cpu) | 755 | static void uv_heartbeat_disable(int cpu) |
756 | { | 756 | { |
757 | if (uv_cpu_hub_info(cpu)->scir.enabled) { | 757 | if (uv_cpu_hub_info(cpu)->scir.enabled) { |
758 | uv_cpu_hub_info(cpu)->scir.enabled = 0; | 758 | uv_cpu_hub_info(cpu)->scir.enabled = 0; |
@@ -764,8 +764,8 @@ static void __cpuinit uv_heartbeat_disable(int cpu) | |||
764 | /* | 764 | /* |
765 | * cpu hotplug notifier | 765 | * cpu hotplug notifier |
766 | */ | 766 | */ |
767 | static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, | 767 | static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action, |
768 | unsigned long action, void *hcpu) | 768 | void *hcpu) |
769 | { | 769 | { |
770 | long cpu = (long)hcpu; | 770 | long cpu = (long)hcpu; |
771 | 771 | ||
@@ -835,7 +835,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode, | |||
835 | * Called on each cpu to initialize the per_cpu UV data area. | 835 | * Called on each cpu to initialize the per_cpu UV data area. |
836 | * FIXME: hotplug not supported yet | 836 | * FIXME: hotplug not supported yet |
837 | */ | 837 | */ |
838 | void __cpuinit uv_cpu_init(void) | 838 | void uv_cpu_init(void) |
839 | { | 839 | { |
840 | /* CPU 0 initilization will be done via uv_system_init. */ | 840 | /* CPU 0 initilization will be done via uv_system_init. */ |
841 | if (!uv_blade_info) | 841 | if (!uv_blade_info) |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index c587a8757227..f654ecefea5b 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -69,7 +69,7 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val) | |||
69 | extern void vide(void); | 69 | extern void vide(void); |
70 | __asm__(".align 4\nvide: ret"); | 70 | __asm__(".align 4\nvide: ret"); |
71 | 71 | ||
72 | static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | 72 | static void init_amd_k5(struct cpuinfo_x86 *c) |
73 | { | 73 | { |
74 | /* | 74 | /* |
75 | * General Systems BIOSen alias the cpu frequency registers | 75 | * General Systems BIOSen alias the cpu frequency registers |
@@ -87,7 +87,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) | |||
87 | } | 87 | } |
88 | 88 | ||
89 | 89 | ||
90 | static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | 90 | static void init_amd_k6(struct cpuinfo_x86 *c) |
91 | { | 91 | { |
92 | u32 l, h; | 92 | u32 l, h; |
93 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); | 93 | int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); |
@@ -179,7 +179,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) | |||
179 | } | 179 | } |
180 | } | 180 | } |
181 | 181 | ||
182 | static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | 182 | static void amd_k7_smp_check(struct cpuinfo_x86 *c) |
183 | { | 183 | { |
184 | /* calling is from identify_secondary_cpu() ? */ | 184 | /* calling is from identify_secondary_cpu() ? */ |
185 | if (!c->cpu_index) | 185 | if (!c->cpu_index) |
@@ -222,7 +222,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) | |||
222 | add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); | 222 | add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); |
223 | } | 223 | } |
224 | 224 | ||
225 | static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | 225 | static void init_amd_k7(struct cpuinfo_x86 *c) |
226 | { | 226 | { |
227 | u32 l, h; | 227 | u32 l, h; |
228 | 228 | ||
@@ -267,7 +267,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
267 | * To workaround broken NUMA config. Read the comment in | 267 | * To workaround broken NUMA config. Read the comment in |
268 | * srat_detect_node(). | 268 | * srat_detect_node(). |
269 | */ | 269 | */ |
270 | static int __cpuinit nearby_node(int apicid) | 270 | static int nearby_node(int apicid) |
271 | { | 271 | { |
272 | int i, node; | 272 | int i, node; |
273 | 273 | ||
@@ -292,7 +292,7 @@ static int __cpuinit nearby_node(int apicid) | |||
292 | * (2) AMD processors supporting compute units | 292 | * (2) AMD processors supporting compute units |
293 | */ | 293 | */ |
294 | #ifdef CONFIG_X86_HT | 294 | #ifdef CONFIG_X86_HT |
295 | static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | 295 | static void amd_get_topology(struct cpuinfo_x86 *c) |
296 | { | 296 | { |
297 | u32 nodes, cores_per_cu = 1; | 297 | u32 nodes, cores_per_cu = 1; |
298 | u8 node_id; | 298 | u8 node_id; |
@@ -342,7 +342,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) | |||
342 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. | 342 | * On a AMD dual core setup the lower bits of the APIC id distingush the cores. |
343 | * Assumes number of cores is a power of two. | 343 | * Assumes number of cores is a power of two. |
344 | */ | 344 | */ |
345 | static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) | 345 | static void amd_detect_cmp(struct cpuinfo_x86 *c) |
346 | { | 346 | { |
347 | #ifdef CONFIG_X86_HT | 347 | #ifdef CONFIG_X86_HT |
348 | unsigned bits; | 348 | unsigned bits; |
@@ -369,7 +369,7 @@ u16 amd_get_nb_id(int cpu) | |||
369 | } | 369 | } |
370 | EXPORT_SYMBOL_GPL(amd_get_nb_id); | 370 | EXPORT_SYMBOL_GPL(amd_get_nb_id); |
371 | 371 | ||
372 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 372 | static void srat_detect_node(struct cpuinfo_x86 *c) |
373 | { | 373 | { |
374 | #ifdef CONFIG_NUMA | 374 | #ifdef CONFIG_NUMA |
375 | int cpu = smp_processor_id(); | 375 | int cpu = smp_processor_id(); |
@@ -421,7 +421,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
421 | #endif | 421 | #endif |
422 | } | 422 | } |
423 | 423 | ||
424 | static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | 424 | static void early_init_amd_mc(struct cpuinfo_x86 *c) |
425 | { | 425 | { |
426 | #ifdef CONFIG_X86_HT | 426 | #ifdef CONFIG_X86_HT |
427 | unsigned bits, ecx; | 427 | unsigned bits, ecx; |
@@ -447,7 +447,7 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) | |||
447 | #endif | 447 | #endif |
448 | } | 448 | } |
449 | 449 | ||
450 | static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) | 450 | static void bsp_init_amd(struct cpuinfo_x86 *c) |
451 | { | 451 | { |
452 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { | 452 | if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { |
453 | 453 | ||
@@ -475,7 +475,7 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) | |||
475 | } | 475 | } |
476 | } | 476 | } |
477 | 477 | ||
478 | static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) | 478 | static void early_init_amd(struct cpuinfo_x86 *c) |
479 | { | 479 | { |
480 | early_init_amd_mc(c); | 480 | early_init_amd_mc(c); |
481 | 481 | ||
@@ -514,7 +514,7 @@ static const int amd_erratum_383[]; | |||
514 | static const int amd_erratum_400[]; | 514 | static const int amd_erratum_400[]; |
515 | static bool cpu_has_amd_erratum(const int *erratum); | 515 | static bool cpu_has_amd_erratum(const int *erratum); |
516 | 516 | ||
517 | static void __cpuinit init_amd(struct cpuinfo_x86 *c) | 517 | static void init_amd(struct cpuinfo_x86 *c) |
518 | { | 518 | { |
519 | u32 dummy; | 519 | u32 dummy; |
520 | unsigned long long value; | 520 | unsigned long long value; |
@@ -740,8 +740,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) | |||
740 | } | 740 | } |
741 | 741 | ||
742 | #ifdef CONFIG_X86_32 | 742 | #ifdef CONFIG_X86_32 |
743 | static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, | 743 | static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
744 | unsigned int size) | ||
745 | { | 744 | { |
746 | /* AMD errata T13 (order #21922) */ | 745 | /* AMD errata T13 (order #21922) */ |
747 | if ((c->x86 == 6)) { | 746 | if ((c->x86 == 6)) { |
@@ -757,7 +756,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, | |||
757 | } | 756 | } |
758 | #endif | 757 | #endif |
759 | 758 | ||
760 | static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) | 759 | static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) |
761 | { | 760 | { |
762 | tlb_flushall_shift = 5; | 761 | tlb_flushall_shift = 5; |
763 | 762 | ||
@@ -765,7 +764,7 @@ static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) | |||
765 | tlb_flushall_shift = 4; | 764 | tlb_flushall_shift = 4; |
766 | } | 765 | } |
767 | 766 | ||
768 | static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | 767 | static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c) |
769 | { | 768 | { |
770 | u32 ebx, eax, ecx, edx; | 769 | u32 ebx, eax, ecx, edx; |
771 | u16 mask = 0xfff; | 770 | u16 mask = 0xfff; |
@@ -820,7 +819,7 @@ static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) | |||
820 | cpu_set_tlb_flushall_shift(c); | 819 | cpu_set_tlb_flushall_shift(c); |
821 | } | 820 | } |
822 | 821 | ||
823 | static const struct cpu_dev __cpuinitconst amd_cpu_dev = { | 822 | static const struct cpu_dev amd_cpu_dev = { |
824 | .c_vendor = "AMD", | 823 | .c_vendor = "AMD", |
825 | .c_ident = { "AuthenticAMD" }, | 824 | .c_ident = { "AuthenticAMD" }, |
826 | #ifdef CONFIG_X86_32 | 825 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 159103c0b1f4..fbf6c3bc2400 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c | |||
@@ -11,7 +11,7 @@ | |||
11 | 11 | ||
12 | #ifdef CONFIG_X86_OOSTORE | 12 | #ifdef CONFIG_X86_OOSTORE |
13 | 13 | ||
14 | static u32 __cpuinit power2(u32 x) | 14 | static u32 power2(u32 x) |
15 | { | 15 | { |
16 | u32 s = 1; | 16 | u32 s = 1; |
17 | 17 | ||
@@ -25,7 +25,7 @@ static u32 __cpuinit power2(u32 x) | |||
25 | /* | 25 | /* |
26 | * Set up an actual MCR | 26 | * Set up an actual MCR |
27 | */ | 27 | */ |
28 | static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | 28 | static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) |
29 | { | 29 | { |
30 | u32 lo, hi; | 30 | u32 lo, hi; |
31 | 31 | ||
@@ -42,7 +42,7 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) | |||
42 | * | 42 | * |
43 | * Shortcut: We know you can't put 4Gig of RAM on a winchip | 43 | * Shortcut: We know you can't put 4Gig of RAM on a winchip |
44 | */ | 44 | */ |
45 | static u32 __cpuinit ramtop(void) | 45 | static u32 ramtop(void) |
46 | { | 46 | { |
47 | u32 clip = 0xFFFFFFFFUL; | 47 | u32 clip = 0xFFFFFFFFUL; |
48 | u32 top = 0; | 48 | u32 top = 0; |
@@ -91,7 +91,7 @@ static u32 __cpuinit ramtop(void) | |||
91 | /* | 91 | /* |
92 | * Compute a set of MCR's to give maximum coverage | 92 | * Compute a set of MCR's to give maximum coverage |
93 | */ | 93 | */ |
94 | static int __cpuinit centaur_mcr_compute(int nr, int key) | 94 | static int centaur_mcr_compute(int nr, int key) |
95 | { | 95 | { |
96 | u32 mem = ramtop(); | 96 | u32 mem = ramtop(); |
97 | u32 root = power2(mem); | 97 | u32 root = power2(mem); |
@@ -157,7 +157,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key) | |||
157 | return ct; | 157 | return ct; |
158 | } | 158 | } |
159 | 159 | ||
160 | static void __cpuinit centaur_create_optimal_mcr(void) | 160 | static void centaur_create_optimal_mcr(void) |
161 | { | 161 | { |
162 | int used; | 162 | int used; |
163 | int i; | 163 | int i; |
@@ -181,7 +181,7 @@ static void __cpuinit centaur_create_optimal_mcr(void) | |||
181 | wrmsr(MSR_IDT_MCR0+i, 0, 0); | 181 | wrmsr(MSR_IDT_MCR0+i, 0, 0); |
182 | } | 182 | } |
183 | 183 | ||
184 | static void __cpuinit winchip2_create_optimal_mcr(void) | 184 | static void winchip2_create_optimal_mcr(void) |
185 | { | 185 | { |
186 | u32 lo, hi; | 186 | u32 lo, hi; |
187 | int used; | 187 | int used; |
@@ -217,7 +217,7 @@ static void __cpuinit winchip2_create_optimal_mcr(void) | |||
217 | /* | 217 | /* |
218 | * Handle the MCR key on the Winchip 2. | 218 | * Handle the MCR key on the Winchip 2. |
219 | */ | 219 | */ |
220 | static void __cpuinit winchip2_unprotect_mcr(void) | 220 | static void winchip2_unprotect_mcr(void) |
221 | { | 221 | { |
222 | u32 lo, hi; | 222 | u32 lo, hi; |
223 | u32 key; | 223 | u32 key; |
@@ -229,7 +229,7 @@ static void __cpuinit winchip2_unprotect_mcr(void) | |||
229 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); | 229 | wrmsr(MSR_IDT_MCR_CTRL, lo, hi); |
230 | } | 230 | } |
231 | 231 | ||
232 | static void __cpuinit winchip2_protect_mcr(void) | 232 | static void winchip2_protect_mcr(void) |
233 | { | 233 | { |
234 | u32 lo, hi; | 234 | u32 lo, hi; |
235 | 235 | ||
@@ -247,7 +247,7 @@ static void __cpuinit winchip2_protect_mcr(void) | |||
247 | #define RNG_ENABLED (1 << 3) | 247 | #define RNG_ENABLED (1 << 3) |
248 | #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ | 248 | #define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ |
249 | 249 | ||
250 | static void __cpuinit init_c3(struct cpuinfo_x86 *c) | 250 | static void init_c3(struct cpuinfo_x86 *c) |
251 | { | 251 | { |
252 | u32 lo, hi; | 252 | u32 lo, hi; |
253 | 253 | ||
@@ -318,7 +318,7 @@ enum { | |||
318 | EAMD3D = 1<<20, | 318 | EAMD3D = 1<<20, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | 321 | static void early_init_centaur(struct cpuinfo_x86 *c) |
322 | { | 322 | { |
323 | switch (c->x86) { | 323 | switch (c->x86) { |
324 | #ifdef CONFIG_X86_32 | 324 | #ifdef CONFIG_X86_32 |
@@ -337,7 +337,7 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) | |||
337 | #endif | 337 | #endif |
338 | } | 338 | } |
339 | 339 | ||
340 | static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | 340 | static void init_centaur(struct cpuinfo_x86 *c) |
341 | { | 341 | { |
342 | #ifdef CONFIG_X86_32 | 342 | #ifdef CONFIG_X86_32 |
343 | char *name; | 343 | char *name; |
@@ -468,7 +468,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c) | |||
468 | #endif | 468 | #endif |
469 | } | 469 | } |
470 | 470 | ||
471 | static unsigned int __cpuinit | 471 | static unsigned int |
472 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 472 | centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
473 | { | 473 | { |
474 | #ifdef CONFIG_X86_32 | 474 | #ifdef CONFIG_X86_32 |
@@ -488,7 +488,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) | |||
488 | return size; | 488 | return size; |
489 | } | 489 | } |
490 | 490 | ||
491 | static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { | 491 | static const struct cpu_dev centaur_cpu_dev = { |
492 | .c_vendor = "Centaur", | 492 | .c_vendor = "Centaur", |
493 | .c_ident = { "CentaurHauls" }, | 493 | .c_ident = { "CentaurHauls" }, |
494 | .c_early_init = early_init_centaur, | 494 | .c_early_init = early_init_centaur, |
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 548bd039784e..25eb2747b063 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c | |||
@@ -63,7 +63,7 @@ void __init setup_cpu_local_masks(void) | |||
63 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | 63 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void __cpuinit default_init(struct cpuinfo_x86 *c) | 66 | static void default_init(struct cpuinfo_x86 *c) |
67 | { | 67 | { |
68 | #ifdef CONFIG_X86_64 | 68 | #ifdef CONFIG_X86_64 |
69 | cpu_detect_cache_sizes(c); | 69 | cpu_detect_cache_sizes(c); |
@@ -80,13 +80,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c) | |||
80 | #endif | 80 | #endif |
81 | } | 81 | } |
82 | 82 | ||
83 | static const struct cpu_dev __cpuinitconst default_cpu = { | 83 | static const struct cpu_dev default_cpu = { |
84 | .c_init = default_init, | 84 | .c_init = default_init, |
85 | .c_vendor = "Unknown", | 85 | .c_vendor = "Unknown", |
86 | .c_x86_vendor = X86_VENDOR_UNKNOWN, | 86 | .c_x86_vendor = X86_VENDOR_UNKNOWN, |
87 | }; | 87 | }; |
88 | 88 | ||
89 | static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; | 89 | static const struct cpu_dev *this_cpu = &default_cpu; |
90 | 90 | ||
91 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { | 91 | DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { |
92 | #ifdef CONFIG_X86_64 | 92 | #ifdef CONFIG_X86_64 |
@@ -160,8 +160,8 @@ static int __init x86_xsaveopt_setup(char *s) | |||
160 | __setup("noxsaveopt", x86_xsaveopt_setup); | 160 | __setup("noxsaveopt", x86_xsaveopt_setup); |
161 | 161 | ||
162 | #ifdef CONFIG_X86_32 | 162 | #ifdef CONFIG_X86_32 |
163 | static int cachesize_override __cpuinitdata = -1; | 163 | static int cachesize_override = -1; |
164 | static int disable_x86_serial_nr __cpuinitdata = 1; | 164 | static int disable_x86_serial_nr = 1; |
165 | 165 | ||
166 | static int __init cachesize_setup(char *str) | 166 | static int __init cachesize_setup(char *str) |
167 | { | 167 | { |
@@ -215,12 +215,12 @@ static inline int flag_is_changeable_p(u32 flag) | |||
215 | } | 215 | } |
216 | 216 | ||
217 | /* Probe for the CPUID instruction */ | 217 | /* Probe for the CPUID instruction */ |
218 | int __cpuinit have_cpuid_p(void) | 218 | int have_cpuid_p(void) |
219 | { | 219 | { |
220 | return flag_is_changeable_p(X86_EFLAGS_ID); | 220 | return flag_is_changeable_p(X86_EFLAGS_ID); |
221 | } | 221 | } |
222 | 222 | ||
223 | static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) | 223 | static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c) |
224 | { | 224 | { |
225 | unsigned long lo, hi; | 225 | unsigned long lo, hi; |
226 | 226 | ||
@@ -298,7 +298,7 @@ struct cpuid_dependent_feature { | |||
298 | u32 level; | 298 | u32 level; |
299 | }; | 299 | }; |
300 | 300 | ||
301 | static const struct cpuid_dependent_feature __cpuinitconst | 301 | static const struct cpuid_dependent_feature |
302 | cpuid_dependent_features[] = { | 302 | cpuid_dependent_features[] = { |
303 | { X86_FEATURE_MWAIT, 0x00000005 }, | 303 | { X86_FEATURE_MWAIT, 0x00000005 }, |
304 | { X86_FEATURE_DCA, 0x00000009 }, | 304 | { X86_FEATURE_DCA, 0x00000009 }, |
@@ -306,7 +306,7 @@ cpuid_dependent_features[] = { | |||
306 | { 0, 0 } | 306 | { 0, 0 } |
307 | }; | 307 | }; |
308 | 308 | ||
309 | static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | 309 | static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) |
310 | { | 310 | { |
311 | const struct cpuid_dependent_feature *df; | 311 | const struct cpuid_dependent_feature *df; |
312 | 312 | ||
@@ -344,7 +344,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) | |||
344 | */ | 344 | */ |
345 | 345 | ||
346 | /* Look up CPU names by table lookup. */ | 346 | /* Look up CPU names by table lookup. */ |
347 | static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) | 347 | static const char *table_lookup_model(struct cpuinfo_x86 *c) |
348 | { | 348 | { |
349 | const struct cpu_model_info *info; | 349 | const struct cpu_model_info *info; |
350 | 350 | ||
@@ -364,8 +364,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) | |||
364 | return NULL; /* Not found */ | 364 | return NULL; /* Not found */ |
365 | } | 365 | } |
366 | 366 | ||
367 | __u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; | 367 | __u32 cpu_caps_cleared[NCAPINTS]; |
368 | __u32 cpu_caps_set[NCAPINTS] __cpuinitdata; | 368 | __u32 cpu_caps_set[NCAPINTS]; |
369 | 369 | ||
370 | void load_percpu_segment(int cpu) | 370 | void load_percpu_segment(int cpu) |
371 | { | 371 | { |
@@ -394,9 +394,9 @@ void switch_to_new_gdt(int cpu) | |||
394 | load_percpu_segment(cpu); | 394 | load_percpu_segment(cpu); |
395 | } | 395 | } |
396 | 396 | ||
397 | static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; | 397 | static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {}; |
398 | 398 | ||
399 | static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | 399 | static void get_model_name(struct cpuinfo_x86 *c) |
400 | { | 400 | { |
401 | unsigned int *v; | 401 | unsigned int *v; |
402 | char *p, *q; | 402 | char *p, *q; |
@@ -425,7 +425,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c) | |||
425 | } | 425 | } |
426 | } | 426 | } |
427 | 427 | ||
428 | void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) | 428 | void cpu_detect_cache_sizes(struct cpuinfo_x86 *c) |
429 | { | 429 | { |
430 | unsigned int n, dummy, ebx, ecx, edx, l2size; | 430 | unsigned int n, dummy, ebx, ecx, edx, l2size; |
431 | 431 | ||
@@ -479,7 +479,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO]; | |||
479 | */ | 479 | */ |
480 | s8 __read_mostly tlb_flushall_shift = -1; | 480 | s8 __read_mostly tlb_flushall_shift = -1; |
481 | 481 | ||
482 | void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) | 482 | void cpu_detect_tlb(struct cpuinfo_x86 *c) |
483 | { | 483 | { |
484 | if (this_cpu->c_detect_tlb) | 484 | if (this_cpu->c_detect_tlb) |
485 | this_cpu->c_detect_tlb(c); | 485 | this_cpu->c_detect_tlb(c); |
@@ -493,7 +493,7 @@ void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) | |||
493 | tlb_flushall_shift); | 493 | tlb_flushall_shift); |
494 | } | 494 | } |
495 | 495 | ||
496 | void __cpuinit detect_ht(struct cpuinfo_x86 *c) | 496 | void detect_ht(struct cpuinfo_x86 *c) |
497 | { | 497 | { |
498 | #ifdef CONFIG_X86_HT | 498 | #ifdef CONFIG_X86_HT |
499 | u32 eax, ebx, ecx, edx; | 499 | u32 eax, ebx, ecx, edx; |
@@ -544,7 +544,7 @@ out: | |||
544 | #endif | 544 | #endif |
545 | } | 545 | } |
546 | 546 | ||
547 | static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | 547 | static void get_cpu_vendor(struct cpuinfo_x86 *c) |
548 | { | 548 | { |
549 | char *v = c->x86_vendor_id; | 549 | char *v = c->x86_vendor_id; |
550 | int i; | 550 | int i; |
@@ -571,7 +571,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) | |||
571 | this_cpu = &default_cpu; | 571 | this_cpu = &default_cpu; |
572 | } | 572 | } |
573 | 573 | ||
574 | void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | 574 | void cpu_detect(struct cpuinfo_x86 *c) |
575 | { | 575 | { |
576 | /* Get vendor name */ | 576 | /* Get vendor name */ |
577 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, | 577 | cpuid(0x00000000, (unsigned int *)&c->cpuid_level, |
@@ -601,7 +601,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c) | |||
601 | } | 601 | } |
602 | } | 602 | } |
603 | 603 | ||
604 | void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | 604 | void get_cpu_cap(struct cpuinfo_x86 *c) |
605 | { | 605 | { |
606 | u32 tfms, xlvl; | 606 | u32 tfms, xlvl; |
607 | u32 ebx; | 607 | u32 ebx; |
@@ -652,7 +652,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) | |||
652 | init_scattered_cpuid_features(c); | 652 | init_scattered_cpuid_features(c); |
653 | } | 653 | } |
654 | 654 | ||
655 | static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) | 655 | static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c) |
656 | { | 656 | { |
657 | #ifdef CONFIG_X86_32 | 657 | #ifdef CONFIG_X86_32 |
658 | int i; | 658 | int i; |
@@ -769,7 +769,7 @@ void __init early_cpu_init(void) | |||
769 | * unless we can find a reliable way to detect all the broken cases. | 769 | * unless we can find a reliable way to detect all the broken cases. |
770 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). | 770 | * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). |
771 | */ | 771 | */ |
772 | static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | 772 | static void detect_nopl(struct cpuinfo_x86 *c) |
773 | { | 773 | { |
774 | #ifdef CONFIG_X86_32 | 774 | #ifdef CONFIG_X86_32 |
775 | clear_cpu_cap(c, X86_FEATURE_NOPL); | 775 | clear_cpu_cap(c, X86_FEATURE_NOPL); |
@@ -778,7 +778,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) | |||
778 | #endif | 778 | #endif |
779 | } | 779 | } |
780 | 780 | ||
781 | static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | 781 | static void generic_identify(struct cpuinfo_x86 *c) |
782 | { | 782 | { |
783 | c->extended_cpuid_level = 0; | 783 | c->extended_cpuid_level = 0; |
784 | 784 | ||
@@ -815,7 +815,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) | |||
815 | /* | 815 | /* |
816 | * This does the hard work of actually picking apart the CPU stuff... | 816 | * This does the hard work of actually picking apart the CPU stuff... |
817 | */ | 817 | */ |
818 | static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) | 818 | static void identify_cpu(struct cpuinfo_x86 *c) |
819 | { | 819 | { |
820 | int i; | 820 | int i; |
821 | 821 | ||
@@ -960,7 +960,7 @@ void __init identify_boot_cpu(void) | |||
960 | cpu_detect_tlb(&boot_cpu_data); | 960 | cpu_detect_tlb(&boot_cpu_data); |
961 | } | 961 | } |
962 | 962 | ||
963 | void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) | 963 | void identify_secondary_cpu(struct cpuinfo_x86 *c) |
964 | { | 964 | { |
965 | BUG_ON(c == &boot_cpu_data); | 965 | BUG_ON(c == &boot_cpu_data); |
966 | identify_cpu(c); | 966 | identify_cpu(c); |
@@ -975,14 +975,14 @@ struct msr_range { | |||
975 | unsigned max; | 975 | unsigned max; |
976 | }; | 976 | }; |
977 | 977 | ||
978 | static const struct msr_range msr_range_array[] __cpuinitconst = { | 978 | static const struct msr_range msr_range_array[] = { |
979 | { 0x00000000, 0x00000418}, | 979 | { 0x00000000, 0x00000418}, |
980 | { 0xc0000000, 0xc000040b}, | 980 | { 0xc0000000, 0xc000040b}, |
981 | { 0xc0010000, 0xc0010142}, | 981 | { 0xc0010000, 0xc0010142}, |
982 | { 0xc0011000, 0xc001103b}, | 982 | { 0xc0011000, 0xc001103b}, |
983 | }; | 983 | }; |
984 | 984 | ||
985 | static void __cpuinit __print_cpu_msr(void) | 985 | static void __print_cpu_msr(void) |
986 | { | 986 | { |
987 | unsigned index_min, index_max; | 987 | unsigned index_min, index_max; |
988 | unsigned index; | 988 | unsigned index; |
@@ -1001,7 +1001,7 @@ static void __cpuinit __print_cpu_msr(void) | |||
1001 | } | 1001 | } |
1002 | } | 1002 | } |
1003 | 1003 | ||
1004 | static int show_msr __cpuinitdata; | 1004 | static int show_msr; |
1005 | 1005 | ||
1006 | static __init int setup_show_msr(char *arg) | 1006 | static __init int setup_show_msr(char *arg) |
1007 | { | 1007 | { |
@@ -1022,7 +1022,7 @@ static __init int setup_noclflush(char *arg) | |||
1022 | } | 1022 | } |
1023 | __setup("noclflush", setup_noclflush); | 1023 | __setup("noclflush", setup_noclflush); |
1024 | 1024 | ||
1025 | void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | 1025 | void print_cpu_info(struct cpuinfo_x86 *c) |
1026 | { | 1026 | { |
1027 | const char *vendor = NULL; | 1027 | const char *vendor = NULL; |
1028 | 1028 | ||
@@ -1051,7 +1051,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) | |||
1051 | print_cpu_msr(c); | 1051 | print_cpu_msr(c); |
1052 | } | 1052 | } |
1053 | 1053 | ||
1054 | void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) | 1054 | void print_cpu_msr(struct cpuinfo_x86 *c) |
1055 | { | 1055 | { |
1056 | if (c->cpu_index < show_msr) | 1056 | if (c->cpu_index < show_msr) |
1057 | __print_cpu_msr(); | 1057 | __print_cpu_msr(); |
@@ -1216,7 +1216,7 @@ static void dbg_restore_debug_regs(void) | |||
1216 | */ | 1216 | */ |
1217 | #ifdef CONFIG_X86_64 | 1217 | #ifdef CONFIG_X86_64 |
1218 | 1218 | ||
1219 | void __cpuinit cpu_init(void) | 1219 | void cpu_init(void) |
1220 | { | 1220 | { |
1221 | struct orig_ist *oist; | 1221 | struct orig_ist *oist; |
1222 | struct task_struct *me; | 1222 | struct task_struct *me; |
@@ -1315,7 +1315,7 @@ void __cpuinit cpu_init(void) | |||
1315 | 1315 | ||
1316 | #else | 1316 | #else |
1317 | 1317 | ||
1318 | void __cpuinit cpu_init(void) | 1318 | void cpu_init(void) |
1319 | { | 1319 | { |
1320 | int cpu = smp_processor_id(); | 1320 | int cpu = smp_processor_id(); |
1321 | struct task_struct *curr = current; | 1321 | struct task_struct *curr = current; |
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 7582f475b163..d0969c75ab54 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c | |||
@@ -15,7 +15,7 @@ | |||
15 | /* | 15 | /* |
16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU | 16 | * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU |
17 | */ | 17 | */ |
18 | static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 18 | static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
19 | { | 19 | { |
20 | unsigned char ccr2, ccr3; | 20 | unsigned char ccr2, ccr3; |
21 | 21 | ||
@@ -44,7 +44,7 @@ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
44 | } | 44 | } |
45 | } | 45 | } |
46 | 46 | ||
47 | static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | 47 | static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) |
48 | { | 48 | { |
49 | unsigned long flags; | 49 | unsigned long flags; |
50 | 50 | ||
@@ -59,25 +59,25 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) | |||
59 | * Actually since bugs.h doesn't even reference this perhaps someone should | 59 | * Actually since bugs.h doesn't even reference this perhaps someone should |
60 | * fix the documentation ??? | 60 | * fix the documentation ??? |
61 | */ | 61 | */ |
62 | static unsigned char Cx86_dir0_msb __cpuinitdata = 0; | 62 | static unsigned char Cx86_dir0_msb = 0; |
63 | 63 | ||
64 | static const char __cpuinitconst Cx86_model[][9] = { | 64 | static const char Cx86_model[][9] = { |
65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", | 65 | "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", |
66 | "M II ", "Unknown" | 66 | "M II ", "Unknown" |
67 | }; | 67 | }; |
68 | static const char __cpuinitconst Cx486_name[][5] = { | 68 | static const char Cx486_name[][5] = { |
69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", | 69 | "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", |
70 | "SRx2", "DRx2" | 70 | "SRx2", "DRx2" |
71 | }; | 71 | }; |
72 | static const char __cpuinitconst Cx486S_name[][4] = { | 72 | static const char Cx486S_name[][4] = { |
73 | "S", "S2", "Se", "S2e" | 73 | "S", "S2", "Se", "S2e" |
74 | }; | 74 | }; |
75 | static const char __cpuinitconst Cx486D_name[][4] = { | 75 | static const char Cx486D_name[][4] = { |
76 | "DX", "DX2", "?", "?", "?", "DX4" | 76 | "DX", "DX2", "?", "?", "?", "DX4" |
77 | }; | 77 | }; |
78 | static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; | 78 | static char Cx86_cb[] = "?.5x Core/Bus Clock"; |
79 | static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; | 79 | static const char cyrix_model_mult1[] = "12??43"; |
80 | static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; | 80 | static const char cyrix_model_mult2[] = "12233445"; |
81 | 81 | ||
82 | /* | 82 | /* |
83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old | 83 | * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old |
@@ -87,7 +87,7 @@ static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; | |||
87 | * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP | 87 | * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP |
88 | */ | 88 | */ |
89 | 89 | ||
90 | static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) | 90 | static void check_cx686_slop(struct cpuinfo_x86 *c) |
91 | { | 91 | { |
92 | unsigned long flags; | 92 | unsigned long flags; |
93 | 93 | ||
@@ -112,7 +112,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) | |||
112 | } | 112 | } |
113 | 113 | ||
114 | 114 | ||
115 | static void __cpuinit set_cx86_reorder(void) | 115 | static void set_cx86_reorder(void) |
116 | { | 116 | { |
117 | u8 ccr3; | 117 | u8 ccr3; |
118 | 118 | ||
@@ -127,7 +127,7 @@ static void __cpuinit set_cx86_reorder(void) | |||
127 | setCx86(CX86_CCR3, ccr3); | 127 | setCx86(CX86_CCR3, ccr3); |
128 | } | 128 | } |
129 | 129 | ||
130 | static void __cpuinit set_cx86_memwb(void) | 130 | static void set_cx86_memwb(void) |
131 | { | 131 | { |
132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); | 132 | printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); |
133 | 133 | ||
@@ -143,7 +143,7 @@ static void __cpuinit set_cx86_memwb(void) | |||
143 | * Configure later MediaGX and/or Geode processor. | 143 | * Configure later MediaGX and/or Geode processor. |
144 | */ | 144 | */ |
145 | 145 | ||
146 | static void __cpuinit geode_configure(void) | 146 | static void geode_configure(void) |
147 | { | 147 | { |
148 | unsigned long flags; | 148 | unsigned long flags; |
149 | u8 ccr3; | 149 | u8 ccr3; |
@@ -166,7 +166,7 @@ static void __cpuinit geode_configure(void) | |||
166 | local_irq_restore(flags); | 166 | local_irq_restore(flags); |
167 | } | 167 | } |
168 | 168 | ||
169 | static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | 169 | static void early_init_cyrix(struct cpuinfo_x86 *c) |
170 | { | 170 | { |
171 | unsigned char dir0, dir0_msn, dir1 = 0; | 171 | unsigned char dir0, dir0_msn, dir1 = 0; |
172 | 172 | ||
@@ -185,7 +185,7 @@ static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) | |||
185 | } | 185 | } |
186 | } | 186 | } |
187 | 187 | ||
188 | static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | 188 | static void init_cyrix(struct cpuinfo_x86 *c) |
189 | { | 189 | { |
190 | unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; | 190 | unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; |
191 | char *buf = c->x86_model_id; | 191 | char *buf = c->x86_model_id; |
@@ -356,7 +356,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) | |||
356 | /* | 356 | /* |
357 | * Handle National Semiconductor branded processors | 357 | * Handle National Semiconductor branded processors |
358 | */ | 358 | */ |
359 | static void __cpuinit init_nsc(struct cpuinfo_x86 *c) | 359 | static void init_nsc(struct cpuinfo_x86 *c) |
360 | { | 360 | { |
361 | /* | 361 | /* |
362 | * There may be GX1 processors in the wild that are branded | 362 | * There may be GX1 processors in the wild that are branded |
@@ -405,7 +405,7 @@ static inline int test_cyrix_52div(void) | |||
405 | return (unsigned char) (test >> 8) == 0x02; | 405 | return (unsigned char) (test >> 8) == 0x02; |
406 | } | 406 | } |
407 | 407 | ||
408 | static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | 408 | static void cyrix_identify(struct cpuinfo_x86 *c) |
409 | { | 409 | { |
410 | /* Detect Cyrix with disabled CPUID */ | 410 | /* Detect Cyrix with disabled CPUID */ |
411 | if (c->x86 == 4 && test_cyrix_52div()) { | 411 | if (c->x86 == 4 && test_cyrix_52div()) { |
@@ -441,7 +441,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) | |||
441 | } | 441 | } |
442 | } | 442 | } |
443 | 443 | ||
444 | static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { | 444 | static const struct cpu_dev cyrix_cpu_dev = { |
445 | .c_vendor = "Cyrix", | 445 | .c_vendor = "Cyrix", |
446 | .c_ident = { "CyrixInstead" }, | 446 | .c_ident = { "CyrixInstead" }, |
447 | .c_early_init = early_init_cyrix, | 447 | .c_early_init = early_init_cyrix, |
@@ -452,7 +452,7 @@ static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { | |||
452 | 452 | ||
453 | cpu_dev_register(cyrix_cpu_dev); | 453 | cpu_dev_register(cyrix_cpu_dev); |
454 | 454 | ||
455 | static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { | 455 | static const struct cpu_dev nsc_cpu_dev = { |
456 | .c_vendor = "NSC", | 456 | .c_vendor = "NSC", |
457 | .c_ident = { "Geode by NSC" }, | 457 | .c_ident = { "Geode by NSC" }, |
458 | .c_init = init_nsc, | 458 | .c_init = init_nsc, |
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c index 1e7e84a02eba..87279212d318 100644 --- a/arch/x86/kernel/cpu/hypervisor.c +++ b/arch/x86/kernel/cpu/hypervisor.c | |||
@@ -60,7 +60,7 @@ detect_hypervisor_vendor(void) | |||
60 | } | 60 | } |
61 | } | 61 | } |
62 | 62 | ||
63 | void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) | 63 | void init_hypervisor(struct cpuinfo_x86 *c) |
64 | { | 64 | { |
65 | if (x86_hyper && x86_hyper->set_cpu_features) | 65 | if (x86_hyper && x86_hyper->set_cpu_features) |
66 | x86_hyper->set_cpu_features(c); | 66 | x86_hyper->set_cpu_features(c); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 9b0c441c03f5..ec7299566f79 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -26,7 +26,7 @@ | |||
26 | #include <asm/apic.h> | 26 | #include <asm/apic.h> |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | 29 | static void early_init_intel(struct cpuinfo_x86 *c) |
30 | { | 30 | { |
31 | u64 misc_enable; | 31 | u64 misc_enable; |
32 | 32 | ||
@@ -163,7 +163,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) | |||
163 | * This is called before we do cpu ident work | 163 | * This is called before we do cpu ident work |
164 | */ | 164 | */ |
165 | 165 | ||
166 | int __cpuinit ppro_with_ram_bug(void) | 166 | int ppro_with_ram_bug(void) |
167 | { | 167 | { |
168 | /* Uses data from early_cpu_detect now */ | 168 | /* Uses data from early_cpu_detect now */ |
169 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | 169 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && |
@@ -176,7 +176,7 @@ int __cpuinit ppro_with_ram_bug(void) | |||
176 | return 0; | 176 | return 0; |
177 | } | 177 | } |
178 | 178 | ||
179 | static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | 179 | static void intel_smp_check(struct cpuinfo_x86 *c) |
180 | { | 180 | { |
181 | /* calling is from identify_secondary_cpu() ? */ | 181 | /* calling is from identify_secondary_cpu() ? */ |
182 | if (!c->cpu_index) | 182 | if (!c->cpu_index) |
@@ -196,7 +196,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) | |||
196 | } | 196 | } |
197 | } | 197 | } |
198 | 198 | ||
199 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 199 | static void intel_workarounds(struct cpuinfo_x86 *c) |
200 | { | 200 | { |
201 | unsigned long lo, hi; | 201 | unsigned long lo, hi; |
202 | 202 | ||
@@ -275,12 +275,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | |||
275 | intel_smp_check(c); | 275 | intel_smp_check(c); |
276 | } | 276 | } |
277 | #else | 277 | #else |
278 | static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) | 278 | static void intel_workarounds(struct cpuinfo_x86 *c) |
279 | { | 279 | { |
280 | } | 280 | } |
281 | #endif | 281 | #endif |
282 | 282 | ||
283 | static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | 283 | static void srat_detect_node(struct cpuinfo_x86 *c) |
284 | { | 284 | { |
285 | #ifdef CONFIG_NUMA | 285 | #ifdef CONFIG_NUMA |
286 | unsigned node; | 286 | unsigned node; |
@@ -300,7 +300,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
300 | /* | 300 | /* |
301 | * find out the number of processor cores on the die | 301 | * find out the number of processor cores on the die |
302 | */ | 302 | */ |
303 | static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | 303 | static int intel_num_cpu_cores(struct cpuinfo_x86 *c) |
304 | { | 304 | { |
305 | unsigned int eax, ebx, ecx, edx; | 305 | unsigned int eax, ebx, ecx, edx; |
306 | 306 | ||
@@ -315,7 +315,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) | |||
315 | return 1; | 315 | return 1; |
316 | } | 316 | } |
317 | 317 | ||
318 | static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) | 318 | static void detect_vmx_virtcap(struct cpuinfo_x86 *c) |
319 | { | 319 | { |
320 | /* Intel VMX MSR indicated features */ | 320 | /* Intel VMX MSR indicated features */ |
321 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 | 321 | #define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 |
@@ -353,7 +353,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) | |||
353 | } | 353 | } |
354 | } | 354 | } |
355 | 355 | ||
356 | static void __cpuinit init_intel(struct cpuinfo_x86 *c) | 356 | static void init_intel(struct cpuinfo_x86 *c) |
357 | { | 357 | { |
358 | unsigned int l2 = 0; | 358 | unsigned int l2 = 0; |
359 | 359 | ||
@@ -472,7 +472,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c) | |||
472 | } | 472 | } |
473 | 473 | ||
474 | #ifdef CONFIG_X86_32 | 474 | #ifdef CONFIG_X86_32 |
475 | static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) | 475 | static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) |
476 | { | 476 | { |
477 | /* | 477 | /* |
478 | * Intel PIII Tualatin. This comes in two flavours. | 478 | * Intel PIII Tualatin. This comes in two flavours. |
@@ -506,7 +506,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i | |||
506 | 506 | ||
507 | #define STLB_4K 0x41 | 507 | #define STLB_4K 0x41 |
508 | 508 | ||
509 | static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { | 509 | static const struct _tlb_table intel_tlb_table[] = { |
510 | { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, | 510 | { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, |
511 | { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, | 511 | { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, |
512 | { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, | 512 | { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, |
@@ -536,7 +536,7 @@ static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { | |||
536 | { 0x00, 0, 0 } | 536 | { 0x00, 0, 0 } |
537 | }; | 537 | }; |
538 | 538 | ||
539 | static void __cpuinit intel_tlb_lookup(const unsigned char desc) | 539 | static void intel_tlb_lookup(const unsigned char desc) |
540 | { | 540 | { |
541 | unsigned char k; | 541 | unsigned char k; |
542 | if (desc == 0) | 542 | if (desc == 0) |
@@ -605,7 +605,7 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc) | |||
605 | } | 605 | } |
606 | } | 606 | } |
607 | 607 | ||
608 | static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) | 608 | static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) |
609 | { | 609 | { |
610 | switch ((c->x86 << 8) + c->x86_model) { | 610 | switch ((c->x86 << 8) + c->x86_model) { |
611 | case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ | 611 | case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ |
@@ -634,7 +634,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) | |||
634 | } | 634 | } |
635 | } | 635 | } |
636 | 636 | ||
637 | static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) | 637 | static void intel_detect_tlb(struct cpuinfo_x86 *c) |
638 | { | 638 | { |
639 | int i, j, n; | 639 | int i, j, n; |
640 | unsigned int regs[4]; | 640 | unsigned int regs[4]; |
@@ -661,7 +661,7 @@ static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) | |||
661 | intel_tlb_flushall_shift_set(c); | 661 | intel_tlb_flushall_shift_set(c); |
662 | } | 662 | } |
663 | 663 | ||
664 | static const struct cpu_dev __cpuinitconst intel_cpu_dev = { | 664 | static const struct cpu_dev intel_cpu_dev = { |
665 | .c_vendor = "Intel", | 665 | .c_vendor = "Intel", |
666 | .c_ident = { "GenuineIntel" }, | 666 | .c_ident = { "GenuineIntel" }, |
667 | #ifdef CONFIG_X86_32 | 667 | #ifdef CONFIG_X86_32 |
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c index 8dc72dda66fe..1414c90feaba 100644 --- a/arch/x86/kernel/cpu/intel_cacheinfo.c +++ b/arch/x86/kernel/cpu/intel_cacheinfo.c | |||
@@ -37,7 +37,7 @@ struct _cache_table { | |||
37 | /* All the cache descriptor types we care about (no TLB or | 37 | /* All the cache descriptor types we care about (no TLB or |
38 | trace cache entries) */ | 38 | trace cache entries) */ |
39 | 39 | ||
40 | static const struct _cache_table __cpuinitconst cache_table[] = | 40 | static const struct _cache_table cache_table[] = |
41 | { | 41 | { |
42 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ | 42 | { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ |
43 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ | 43 | { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ |
@@ -203,7 +203,7 @@ union l3_cache { | |||
203 | unsigned val; | 203 | unsigned val; |
204 | }; | 204 | }; |
205 | 205 | ||
206 | static const unsigned short __cpuinitconst assocs[] = { | 206 | static const unsigned short assocs[] = { |
207 | [1] = 1, | 207 | [1] = 1, |
208 | [2] = 2, | 208 | [2] = 2, |
209 | [4] = 4, | 209 | [4] = 4, |
@@ -217,10 +217,10 @@ static const unsigned short __cpuinitconst assocs[] = { | |||
217 | [0xf] = 0xffff /* fully associative - no way to show this currently */ | 217 | [0xf] = 0xffff /* fully associative - no way to show this currently */ |
218 | }; | 218 | }; |
219 | 219 | ||
220 | static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; | 220 | static const unsigned char levels[] = { 1, 1, 2, 3 }; |
221 | static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; | 221 | static const unsigned char types[] = { 1, 2, 3, 3 }; |
222 | 222 | ||
223 | static void __cpuinit | 223 | static void |
224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, | 224 | amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, |
225 | union _cpuid4_leaf_ebx *ebx, | 225 | union _cpuid4_leaf_ebx *ebx, |
226 | union _cpuid4_leaf_ecx *ecx) | 226 | union _cpuid4_leaf_ecx *ecx) |
@@ -302,7 +302,7 @@ struct _cache_attr { | |||
302 | /* | 302 | /* |
303 | * L3 cache descriptors | 303 | * L3 cache descriptors |
304 | */ | 304 | */ |
305 | static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | 305 | static void amd_calc_l3_indices(struct amd_northbridge *nb) |
306 | { | 306 | { |
307 | struct amd_l3_cache *l3 = &nb->l3_cache; | 307 | struct amd_l3_cache *l3 = &nb->l3_cache; |
308 | unsigned int sc0, sc1, sc2, sc3; | 308 | unsigned int sc0, sc1, sc2, sc3; |
@@ -325,7 +325,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) | |||
325 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; | 325 | l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; |
326 | } | 326 | } |
327 | 327 | ||
328 | static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) | 328 | static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) |
329 | { | 329 | { |
330 | int node; | 330 | int node; |
331 | 331 | ||
@@ -528,8 +528,7 @@ static struct _cache_attr subcaches = | |||
528 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ | 528 | #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ |
529 | 529 | ||
530 | static int | 530 | static int |
531 | __cpuinit cpuid4_cache_lookup_regs(int index, | 531 | cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf) |
532 | struct _cpuid4_info_regs *this_leaf) | ||
533 | { | 532 | { |
534 | union _cpuid4_leaf_eax eax; | 533 | union _cpuid4_leaf_eax eax; |
535 | union _cpuid4_leaf_ebx ebx; | 534 | union _cpuid4_leaf_ebx ebx; |
@@ -560,7 +559,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index, | |||
560 | return 0; | 559 | return 0; |
561 | } | 560 | } |
562 | 561 | ||
563 | static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) | 562 | static int find_num_cache_leaves(struct cpuinfo_x86 *c) |
564 | { | 563 | { |
565 | unsigned int eax, ebx, ecx, edx, op; | 564 | unsigned int eax, ebx, ecx, edx, op; |
566 | union _cpuid4_leaf_eax cache_eax; | 565 | union _cpuid4_leaf_eax cache_eax; |
@@ -580,7 +579,7 @@ static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) | |||
580 | return i; | 579 | return i; |
581 | } | 580 | } |
582 | 581 | ||
583 | void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) | 582 | void init_amd_cacheinfo(struct cpuinfo_x86 *c) |
584 | { | 583 | { |
585 | 584 | ||
586 | if (cpu_has_topoext) { | 585 | if (cpu_has_topoext) { |
@@ -593,7 +592,7 @@ void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) | |||
593 | } | 592 | } |
594 | } | 593 | } |
595 | 594 | ||
596 | unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) | 595 | unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c) |
597 | { | 596 | { |
598 | /* Cache sizes */ | 597 | /* Cache sizes */ |
599 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; | 598 | unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; |
@@ -744,7 +743,7 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); | |||
744 | 743 | ||
745 | #ifdef CONFIG_SMP | 744 | #ifdef CONFIG_SMP |
746 | 745 | ||
747 | static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | 746 | static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) |
748 | { | 747 | { |
749 | struct _cpuid4_info *this_leaf; | 748 | struct _cpuid4_info *this_leaf; |
750 | int i, sibling; | 749 | int i, sibling; |
@@ -793,7 +792,7 @@ static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) | |||
793 | return 1; | 792 | return 1; |
794 | } | 793 | } |
795 | 794 | ||
796 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 795 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) |
797 | { | 796 | { |
798 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 797 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
799 | unsigned long num_threads_sharing; | 798 | unsigned long num_threads_sharing; |
@@ -828,7 +827,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | |||
828 | } | 827 | } |
829 | } | 828 | } |
830 | } | 829 | } |
831 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | 830 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) |
832 | { | 831 | { |
833 | struct _cpuid4_info *this_leaf, *sibling_leaf; | 832 | struct _cpuid4_info *this_leaf, *sibling_leaf; |
834 | int sibling; | 833 | int sibling; |
@@ -841,16 +840,16 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | |||
841 | } | 840 | } |
842 | } | 841 | } |
843 | #else | 842 | #else |
844 | static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) | 843 | static void cache_shared_cpu_map_setup(unsigned int cpu, int index) |
845 | { | 844 | { |
846 | } | 845 | } |
847 | 846 | ||
848 | static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) | 847 | static void cache_remove_shared_cpu_map(unsigned int cpu, int index) |
849 | { | 848 | { |
850 | } | 849 | } |
851 | #endif | 850 | #endif |
852 | 851 | ||
853 | static void __cpuinit free_cache_attributes(unsigned int cpu) | 852 | static void free_cache_attributes(unsigned int cpu) |
854 | { | 853 | { |
855 | int i; | 854 | int i; |
856 | 855 | ||
@@ -861,7 +860,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu) | |||
861 | per_cpu(ici_cpuid4_info, cpu) = NULL; | 860 | per_cpu(ici_cpuid4_info, cpu) = NULL; |
862 | } | 861 | } |
863 | 862 | ||
864 | static void __cpuinit get_cpu_leaves(void *_retval) | 863 | static void get_cpu_leaves(void *_retval) |
865 | { | 864 | { |
866 | int j, *retval = _retval, cpu = smp_processor_id(); | 865 | int j, *retval = _retval, cpu = smp_processor_id(); |
867 | 866 | ||
@@ -881,7 +880,7 @@ static void __cpuinit get_cpu_leaves(void *_retval) | |||
881 | } | 880 | } |
882 | } | 881 | } |
883 | 882 | ||
884 | static int __cpuinit detect_cache_attributes(unsigned int cpu) | 883 | static int detect_cache_attributes(unsigned int cpu) |
885 | { | 884 | { |
886 | int retval; | 885 | int retval; |
887 | 886 | ||
@@ -1015,7 +1014,7 @@ static struct attribute *default_attrs[] = { | |||
1015 | }; | 1014 | }; |
1016 | 1015 | ||
1017 | #ifdef CONFIG_AMD_NB | 1016 | #ifdef CONFIG_AMD_NB |
1018 | static struct attribute ** __cpuinit amd_l3_attrs(void) | 1017 | static struct attribute **amd_l3_attrs(void) |
1019 | { | 1018 | { |
1020 | static struct attribute **attrs; | 1019 | static struct attribute **attrs; |
1021 | int n; | 1020 | int n; |
@@ -1091,7 +1090,7 @@ static struct kobj_type ktype_percpu_entry = { | |||
1091 | .sysfs_ops = &sysfs_ops, | 1090 | .sysfs_ops = &sysfs_ops, |
1092 | }; | 1091 | }; |
1093 | 1092 | ||
1094 | static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | 1093 | static void cpuid4_cache_sysfs_exit(unsigned int cpu) |
1095 | { | 1094 | { |
1096 | kfree(per_cpu(ici_cache_kobject, cpu)); | 1095 | kfree(per_cpu(ici_cache_kobject, cpu)); |
1097 | kfree(per_cpu(ici_index_kobject, cpu)); | 1096 | kfree(per_cpu(ici_index_kobject, cpu)); |
@@ -1100,7 +1099,7 @@ static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) | |||
1100 | free_cache_attributes(cpu); | 1099 | free_cache_attributes(cpu); |
1101 | } | 1100 | } |
1102 | 1101 | ||
1103 | static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) | 1102 | static int cpuid4_cache_sysfs_init(unsigned int cpu) |
1104 | { | 1103 | { |
1105 | int err; | 1104 | int err; |
1106 | 1105 | ||
@@ -1132,7 +1131,7 @@ err_out: | |||
1132 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); | 1131 | static DECLARE_BITMAP(cache_dev_map, NR_CPUS); |
1133 | 1132 | ||
1134 | /* Add/Remove cache interface for CPU device */ | 1133 | /* Add/Remove cache interface for CPU device */ |
1135 | static int __cpuinit cache_add_dev(struct device *dev) | 1134 | static int cache_add_dev(struct device *dev) |
1136 | { | 1135 | { |
1137 | unsigned int cpu = dev->id; | 1136 | unsigned int cpu = dev->id; |
1138 | unsigned long i, j; | 1137 | unsigned long i, j; |
@@ -1183,7 +1182,7 @@ static int __cpuinit cache_add_dev(struct device *dev) | |||
1183 | return 0; | 1182 | return 0; |
1184 | } | 1183 | } |
1185 | 1184 | ||
1186 | static void __cpuinit cache_remove_dev(struct device *dev) | 1185 | static void cache_remove_dev(struct device *dev) |
1187 | { | 1186 | { |
1188 | unsigned int cpu = dev->id; | 1187 | unsigned int cpu = dev->id; |
1189 | unsigned long i; | 1188 | unsigned long i; |
@@ -1200,8 +1199,8 @@ static void __cpuinit cache_remove_dev(struct device *dev) | |||
1200 | cpuid4_cache_sysfs_exit(cpu); | 1199 | cpuid4_cache_sysfs_exit(cpu); |
1201 | } | 1200 | } |
1202 | 1201 | ||
1203 | static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | 1202 | static int cacheinfo_cpu_callback(struct notifier_block *nfb, |
1204 | unsigned long action, void *hcpu) | 1203 | unsigned long action, void *hcpu) |
1205 | { | 1204 | { |
1206 | unsigned int cpu = (unsigned long)hcpu; | 1205 | unsigned int cpu = (unsigned long)hcpu; |
1207 | struct device *dev; | 1206 | struct device *dev; |
@@ -1220,7 +1219,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, | |||
1220 | return NOTIFY_OK; | 1219 | return NOTIFY_OK; |
1221 | } | 1220 | } |
1222 | 1221 | ||
1223 | static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { | 1222 | static struct notifier_block cacheinfo_cpu_notifier = { |
1224 | .notifier_call = cacheinfo_cpu_callback, | 1223 | .notifier_call = cacheinfo_cpu_callback, |
1225 | }; | 1224 | }; |
1226 | 1225 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index bf49cdbb010f..87a65c939bcd 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -1363,7 +1363,7 @@ int mce_notify_irq(void) | |||
1363 | } | 1363 | } |
1364 | EXPORT_SYMBOL_GPL(mce_notify_irq); | 1364 | EXPORT_SYMBOL_GPL(mce_notify_irq); |
1365 | 1365 | ||
1366 | static int __cpuinit __mcheck_cpu_mce_banks_init(void) | 1366 | static int __mcheck_cpu_mce_banks_init(void) |
1367 | { | 1367 | { |
1368 | int i; | 1368 | int i; |
1369 | u8 num_banks = mca_cfg.banks; | 1369 | u8 num_banks = mca_cfg.banks; |
@@ -1384,7 +1384,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void) | |||
1384 | /* | 1384 | /* |
1385 | * Initialize Machine Checks for a CPU. | 1385 | * Initialize Machine Checks for a CPU. |
1386 | */ | 1386 | */ |
1387 | static int __cpuinit __mcheck_cpu_cap_init(void) | 1387 | static int __mcheck_cpu_cap_init(void) |
1388 | { | 1388 | { |
1389 | unsigned b; | 1389 | unsigned b; |
1390 | u64 cap; | 1390 | u64 cap; |
@@ -1483,7 +1483,7 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs) | |||
1483 | } | 1483 | } |
1484 | 1484 | ||
1485 | /* Add per CPU specific workarounds here */ | 1485 | /* Add per CPU specific workarounds here */ |
1486 | static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) | 1486 | static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) |
1487 | { | 1487 | { |
1488 | struct mca_config *cfg = &mca_cfg; | 1488 | struct mca_config *cfg = &mca_cfg; |
1489 | 1489 | ||
@@ -1593,7 +1593,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) | |||
1593 | return 0; | 1593 | return 0; |
1594 | } | 1594 | } |
1595 | 1595 | ||
1596 | static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) | 1596 | static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) |
1597 | { | 1597 | { |
1598 | if (c->x86 != 5) | 1598 | if (c->x86 != 5) |
1599 | return 0; | 1599 | return 0; |
@@ -1664,7 +1664,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) = | |||
1664 | * Called for each booted CPU to set up machine checks. | 1664 | * Called for each booted CPU to set up machine checks. |
1665 | * Must be called with preempt off: | 1665 | * Must be called with preempt off: |
1666 | */ | 1666 | */ |
1667 | void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) | 1667 | void mcheck_cpu_init(struct cpuinfo_x86 *c) |
1668 | { | 1668 | { |
1669 | if (mca_cfg.disabled) | 1669 | if (mca_cfg.disabled) |
1670 | return; | 1670 | return; |
@@ -2082,7 +2082,6 @@ static struct bus_type mce_subsys = { | |||
2082 | 2082 | ||
2083 | DEFINE_PER_CPU(struct device *, mce_device); | 2083 | DEFINE_PER_CPU(struct device *, mce_device); |
2084 | 2084 | ||
2085 | __cpuinitdata | ||
2086 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); | 2085 | void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); |
2087 | 2086 | ||
2088 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) | 2087 | static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) |
@@ -2228,7 +2227,7 @@ static void mce_device_release(struct device *dev) | |||
2228 | } | 2227 | } |
2229 | 2228 | ||
2230 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ | 2229 | /* Per cpu device init. All of the cpus still share the same ctrl bank: */ |
2231 | static __cpuinit int mce_device_create(unsigned int cpu) | 2230 | static int mce_device_create(unsigned int cpu) |
2232 | { | 2231 | { |
2233 | struct device *dev; | 2232 | struct device *dev; |
2234 | int err; | 2233 | int err; |
@@ -2274,7 +2273,7 @@ error: | |||
2274 | return err; | 2273 | return err; |
2275 | } | 2274 | } |
2276 | 2275 | ||
2277 | static __cpuinit void mce_device_remove(unsigned int cpu) | 2276 | static void mce_device_remove(unsigned int cpu) |
2278 | { | 2277 | { |
2279 | struct device *dev = per_cpu(mce_device, cpu); | 2278 | struct device *dev = per_cpu(mce_device, cpu); |
2280 | int i; | 2279 | int i; |
@@ -2294,7 +2293,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu) | |||
2294 | } | 2293 | } |
2295 | 2294 | ||
2296 | /* Make sure there are no machine checks on offlined CPUs. */ | 2295 | /* Make sure there are no machine checks on offlined CPUs. */ |
2297 | static void __cpuinit mce_disable_cpu(void *h) | 2296 | static void mce_disable_cpu(void *h) |
2298 | { | 2297 | { |
2299 | unsigned long action = *(unsigned long *)h; | 2298 | unsigned long action = *(unsigned long *)h; |
2300 | int i; | 2299 | int i; |
@@ -2312,7 +2311,7 @@ static void __cpuinit mce_disable_cpu(void *h) | |||
2312 | } | 2311 | } |
2313 | } | 2312 | } |
2314 | 2313 | ||
2315 | static void __cpuinit mce_reenable_cpu(void *h) | 2314 | static void mce_reenable_cpu(void *h) |
2316 | { | 2315 | { |
2317 | unsigned long action = *(unsigned long *)h; | 2316 | unsigned long action = *(unsigned long *)h; |
2318 | int i; | 2317 | int i; |
@@ -2331,7 +2330,7 @@ static void __cpuinit mce_reenable_cpu(void *h) | |||
2331 | } | 2330 | } |
2332 | 2331 | ||
2333 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 2332 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
2334 | static int __cpuinit | 2333 | static int |
2335 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | 2334 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) |
2336 | { | 2335 | { |
2337 | unsigned int cpu = (unsigned long)hcpu; | 2336 | unsigned int cpu = (unsigned long)hcpu; |
@@ -2367,7 +2366,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
2367 | return NOTIFY_OK; | 2366 | return NOTIFY_OK; |
2368 | } | 2367 | } |
2369 | 2368 | ||
2370 | static struct notifier_block mce_cpu_notifier __cpuinitdata = { | 2369 | static struct notifier_block mce_cpu_notifier = { |
2371 | .notifier_call = mce_cpu_callback, | 2370 | .notifier_call = mce_cpu_callback, |
2372 | }; | 2371 | }; |
2373 | 2372 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 9cb52767999a..603df4f74640 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -458,10 +458,8 @@ static struct kobj_type threshold_ktype = { | |||
458 | .default_attrs = default_attrs, | 458 | .default_attrs = default_attrs, |
459 | }; | 459 | }; |
460 | 460 | ||
461 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | 461 | static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, |
462 | unsigned int bank, | 462 | unsigned int block, u32 address) |
463 | unsigned int block, | ||
464 | u32 address) | ||
465 | { | 463 | { |
466 | struct threshold_block *b = NULL; | 464 | struct threshold_block *b = NULL; |
467 | u32 low, high; | 465 | u32 low, high; |
@@ -543,7 +541,7 @@ out_free: | |||
543 | return err; | 541 | return err; |
544 | } | 542 | } |
545 | 543 | ||
546 | static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) | 544 | static int __threshold_add_blocks(struct threshold_bank *b) |
547 | { | 545 | { |
548 | struct list_head *head = &b->blocks->miscj; | 546 | struct list_head *head = &b->blocks->miscj; |
549 | struct threshold_block *pos = NULL; | 547 | struct threshold_block *pos = NULL; |
@@ -567,7 +565,7 @@ static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) | |||
567 | return err; | 565 | return err; |
568 | } | 566 | } |
569 | 567 | ||
570 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | 568 | static int threshold_create_bank(unsigned int cpu, unsigned int bank) |
571 | { | 569 | { |
572 | struct device *dev = per_cpu(mce_device, cpu); | 570 | struct device *dev = per_cpu(mce_device, cpu); |
573 | struct amd_northbridge *nb = NULL; | 571 | struct amd_northbridge *nb = NULL; |
@@ -632,7 +630,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
632 | } | 630 | } |
633 | 631 | ||
634 | /* create dir/files for all valid threshold banks */ | 632 | /* create dir/files for all valid threshold banks */ |
635 | static __cpuinit int threshold_create_device(unsigned int cpu) | 633 | static int threshold_create_device(unsigned int cpu) |
636 | { | 634 | { |
637 | unsigned int bank; | 635 | unsigned int bank; |
638 | struct threshold_bank **bp; | 636 | struct threshold_bank **bp; |
@@ -736,7 +734,7 @@ static void threshold_remove_device(unsigned int cpu) | |||
736 | } | 734 | } |
737 | 735 | ||
738 | /* get notified when a cpu comes on/off */ | 736 | /* get notified when a cpu comes on/off */ |
739 | static void __cpuinit | 737 | static void |
740 | amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) | 738 | amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) |
741 | { | 739 | { |
742 | switch (action) { | 740 | switch (action) { |
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index 41e8e00a6637..3eec7de76efb 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c | |||
@@ -240,8 +240,7 @@ __setup("int_pln_enable", int_pln_enable_setup); | |||
240 | 240 | ||
241 | #ifdef CONFIG_SYSFS | 241 | #ifdef CONFIG_SYSFS |
242 | /* Add/Remove thermal_throttle interface for CPU device: */ | 242 | /* Add/Remove thermal_throttle interface for CPU device: */ |
243 | static __cpuinit int thermal_throttle_add_dev(struct device *dev, | 243 | static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu) |
244 | unsigned int cpu) | ||
245 | { | 244 | { |
246 | int err; | 245 | int err; |
247 | struct cpuinfo_x86 *c = &cpu_data(cpu); | 246 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
@@ -267,7 +266,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev, | |||
267 | return err; | 266 | return err; |
268 | } | 267 | } |
269 | 268 | ||
270 | static __cpuinit void thermal_throttle_remove_dev(struct device *dev) | 269 | static void thermal_throttle_remove_dev(struct device *dev) |
271 | { | 270 | { |
272 | sysfs_remove_group(&dev->kobj, &thermal_attr_group); | 271 | sysfs_remove_group(&dev->kobj, &thermal_attr_group); |
273 | } | 272 | } |
@@ -276,7 +275,7 @@ static __cpuinit void thermal_throttle_remove_dev(struct device *dev) | |||
276 | static DEFINE_MUTEX(therm_cpu_lock); | 275 | static DEFINE_MUTEX(therm_cpu_lock); |
277 | 276 | ||
278 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | 277 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ |
279 | static __cpuinit int | 278 | static int |
280 | thermal_throttle_cpu_callback(struct notifier_block *nfb, | 279 | thermal_throttle_cpu_callback(struct notifier_block *nfb, |
281 | unsigned long action, | 280 | unsigned long action, |
282 | void *hcpu) | 281 | void *hcpu) |
@@ -307,7 +306,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb, | |||
307 | return notifier_from_errno(err); | 306 | return notifier_from_errno(err); |
308 | } | 307 | } |
309 | 308 | ||
310 | static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = | 309 | static struct notifier_block thermal_throttle_cpu_notifier = |
311 | { | 310 | { |
312 | .notifier_call = thermal_throttle_cpu_callback, | 311 | .notifier_call = thermal_throttle_cpu_callback, |
313 | }; | 312 | }; |
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index 9e581c5cf6d0..a7c7305030cc 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c | |||
@@ -1295,7 +1295,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs) | |||
1295 | struct event_constraint emptyconstraint; | 1295 | struct event_constraint emptyconstraint; |
1296 | struct event_constraint unconstrained; | 1296 | struct event_constraint unconstrained; |
1297 | 1297 | ||
1298 | static int __cpuinit | 1298 | static int |
1299 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 1299 | x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
1300 | { | 1300 | { |
1301 | unsigned int cpu = (long)hcpu; | 1301 | unsigned int cpu = (long)hcpu; |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c index 5f0581e713c2..e09f0bfb7b8f 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c +++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c | |||
@@ -851,7 +851,7 @@ static void clear_APIC_ibs(void *dummy) | |||
851 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); | 851 | setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); |
852 | } | 852 | } |
853 | 853 | ||
854 | static int __cpuinit | 854 | static int |
855 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 855 | perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) |
856 | { | 856 | { |
857 | switch (action & ~CPU_TASKS_FROZEN) { | 857 | switch (action & ~CPU_TASKS_FROZEN) { |
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c index c0c661adf03e..754291adec33 100644 --- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c | |||
@@ -288,13 +288,13 @@ static struct pmu amd_l2_pmu = { | |||
288 | .read = amd_uncore_read, | 288 | .read = amd_uncore_read, |
289 | }; | 289 | }; |
290 | 290 | ||
291 | static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) | 291 | static struct amd_uncore *amd_uncore_alloc(unsigned int cpu) |
292 | { | 292 | { |
293 | return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, | 293 | return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, |
294 | cpu_to_node(cpu)); | 294 | cpu_to_node(cpu)); |
295 | } | 295 | } |
296 | 296 | ||
297 | static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) | 297 | static void amd_uncore_cpu_up_prepare(unsigned int cpu) |
298 | { | 298 | { |
299 | struct amd_uncore *uncore; | 299 | struct amd_uncore *uncore; |
300 | 300 | ||
@@ -322,8 +322,8 @@ static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) | |||
322 | } | 322 | } |
323 | 323 | ||
324 | static struct amd_uncore * | 324 | static struct amd_uncore * |
325 | __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, | 325 | amd_uncore_find_online_sibling(struct amd_uncore *this, |
326 | struct amd_uncore * __percpu *uncores) | 326 | struct amd_uncore * __percpu *uncores) |
327 | { | 327 | { |
328 | unsigned int cpu; | 328 | unsigned int cpu; |
329 | struct amd_uncore *that; | 329 | struct amd_uncore *that; |
@@ -348,7 +348,7 @@ __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, | |||
348 | return this; | 348 | return this; |
349 | } | 349 | } |
350 | 350 | ||
351 | static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) | 351 | static void amd_uncore_cpu_starting(unsigned int cpu) |
352 | { | 352 | { |
353 | unsigned int eax, ebx, ecx, edx; | 353 | unsigned int eax, ebx, ecx, edx; |
354 | struct amd_uncore *uncore; | 354 | struct amd_uncore *uncore; |
@@ -376,8 +376,8 @@ static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) | |||
376 | } | 376 | } |
377 | } | 377 | } |
378 | 378 | ||
379 | static void __cpuinit uncore_online(unsigned int cpu, | 379 | static void uncore_online(unsigned int cpu, |
380 | struct amd_uncore * __percpu *uncores) | 380 | struct amd_uncore * __percpu *uncores) |
381 | { | 381 | { |
382 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | 382 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); |
383 | 383 | ||
@@ -388,7 +388,7 @@ static void __cpuinit uncore_online(unsigned int cpu, | |||
388 | cpumask_set_cpu(cpu, uncore->active_mask); | 388 | cpumask_set_cpu(cpu, uncore->active_mask); |
389 | } | 389 | } |
390 | 390 | ||
391 | static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) | 391 | static void amd_uncore_cpu_online(unsigned int cpu) |
392 | { | 392 | { |
393 | if (amd_uncore_nb) | 393 | if (amd_uncore_nb) |
394 | uncore_online(cpu, amd_uncore_nb); | 394 | uncore_online(cpu, amd_uncore_nb); |
@@ -397,8 +397,8 @@ static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) | |||
397 | uncore_online(cpu, amd_uncore_l2); | 397 | uncore_online(cpu, amd_uncore_l2); |
398 | } | 398 | } |
399 | 399 | ||
400 | static void __cpuinit uncore_down_prepare(unsigned int cpu, | 400 | static void uncore_down_prepare(unsigned int cpu, |
401 | struct amd_uncore * __percpu *uncores) | 401 | struct amd_uncore * __percpu *uncores) |
402 | { | 402 | { |
403 | unsigned int i; | 403 | unsigned int i; |
404 | struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); | 404 | struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); |
@@ -423,7 +423,7 @@ static void __cpuinit uncore_down_prepare(unsigned int cpu, | |||
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) | 426 | static void amd_uncore_cpu_down_prepare(unsigned int cpu) |
427 | { | 427 | { |
428 | if (amd_uncore_nb) | 428 | if (amd_uncore_nb) |
429 | uncore_down_prepare(cpu, amd_uncore_nb); | 429 | uncore_down_prepare(cpu, amd_uncore_nb); |
@@ -432,8 +432,7 @@ static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) | |||
432 | uncore_down_prepare(cpu, amd_uncore_l2); | 432 | uncore_down_prepare(cpu, amd_uncore_l2); |
433 | } | 433 | } |
434 | 434 | ||
435 | static void __cpuinit uncore_dead(unsigned int cpu, | 435 | static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores) |
436 | struct amd_uncore * __percpu *uncores) | ||
437 | { | 436 | { |
438 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); | 437 | struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); |
439 | 438 | ||
@@ -445,7 +444,7 @@ static void __cpuinit uncore_dead(unsigned int cpu, | |||
445 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; | 444 | *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; |
446 | } | 445 | } |
447 | 446 | ||
448 | static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) | 447 | static void amd_uncore_cpu_dead(unsigned int cpu) |
449 | { | 448 | { |
450 | if (amd_uncore_nb) | 449 | if (amd_uncore_nb) |
451 | uncore_dead(cpu, amd_uncore_nb); | 450 | uncore_dead(cpu, amd_uncore_nb); |
@@ -454,7 +453,7 @@ static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) | |||
454 | uncore_dead(cpu, amd_uncore_l2); | 453 | uncore_dead(cpu, amd_uncore_l2); |
455 | } | 454 | } |
456 | 455 | ||
457 | static int __cpuinit | 456 | static int |
458 | amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | 457 | amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, |
459 | void *hcpu) | 458 | void *hcpu) |
460 | { | 459 | { |
@@ -489,7 +488,7 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, | |||
489 | return NOTIFY_OK; | 488 | return NOTIFY_OK; |
490 | } | 489 | } |
491 | 490 | ||
492 | static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { | 491 | static struct notifier_block amd_uncore_cpu_notifier_block = { |
493 | .notifier_call = amd_uncore_cpu_notifier, | 492 | .notifier_call = amd_uncore_cpu_notifier, |
494 | .priority = CPU_PRI_PERF + 1, | 493 | .priority = CPU_PRI_PERF + 1, |
495 | }; | 494 | }; |
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c index 9dd99751ccf9..cad791dbde95 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c | |||
@@ -3297,7 +3297,7 @@ static void __init uncore_pci_exit(void) | |||
3297 | /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ | 3297 | /* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ |
3298 | static LIST_HEAD(boxes_to_free); | 3298 | static LIST_HEAD(boxes_to_free); |
3299 | 3299 | ||
3300 | static void __cpuinit uncore_kfree_boxes(void) | 3300 | static void uncore_kfree_boxes(void) |
3301 | { | 3301 | { |
3302 | struct intel_uncore_box *box; | 3302 | struct intel_uncore_box *box; |
3303 | 3303 | ||
@@ -3309,7 +3309,7 @@ static void __cpuinit uncore_kfree_boxes(void) | |||
3309 | } | 3309 | } |
3310 | } | 3310 | } |
3311 | 3311 | ||
3312 | static void __cpuinit uncore_cpu_dying(int cpu) | 3312 | static void uncore_cpu_dying(int cpu) |
3313 | { | 3313 | { |
3314 | struct intel_uncore_type *type; | 3314 | struct intel_uncore_type *type; |
3315 | struct intel_uncore_pmu *pmu; | 3315 | struct intel_uncore_pmu *pmu; |
@@ -3328,7 +3328,7 @@ static void __cpuinit uncore_cpu_dying(int cpu) | |||
3328 | } | 3328 | } |
3329 | } | 3329 | } |
3330 | 3330 | ||
3331 | static int __cpuinit uncore_cpu_starting(int cpu) | 3331 | static int uncore_cpu_starting(int cpu) |
3332 | { | 3332 | { |
3333 | struct intel_uncore_type *type; | 3333 | struct intel_uncore_type *type; |
3334 | struct intel_uncore_pmu *pmu; | 3334 | struct intel_uncore_pmu *pmu; |
@@ -3371,7 +3371,7 @@ static int __cpuinit uncore_cpu_starting(int cpu) | |||
3371 | return 0; | 3371 | return 0; |
3372 | } | 3372 | } |
3373 | 3373 | ||
3374 | static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) | 3374 | static int uncore_cpu_prepare(int cpu, int phys_id) |
3375 | { | 3375 | { |
3376 | struct intel_uncore_type *type; | 3376 | struct intel_uncore_type *type; |
3377 | struct intel_uncore_pmu *pmu; | 3377 | struct intel_uncore_pmu *pmu; |
@@ -3397,7 +3397,7 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) | |||
3397 | return 0; | 3397 | return 0; |
3398 | } | 3398 | } |
3399 | 3399 | ||
3400 | static void __cpuinit | 3400 | static void |
3401 | uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) | 3401 | uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) |
3402 | { | 3402 | { |
3403 | struct intel_uncore_type *type; | 3403 | struct intel_uncore_type *type; |
@@ -3435,7 +3435,7 @@ uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_c | |||
3435 | } | 3435 | } |
3436 | } | 3436 | } |
3437 | 3437 | ||
3438 | static void __cpuinit uncore_event_exit_cpu(int cpu) | 3438 | static void uncore_event_exit_cpu(int cpu) |
3439 | { | 3439 | { |
3440 | int i, phys_id, target; | 3440 | int i, phys_id, target; |
3441 | 3441 | ||
@@ -3463,7 +3463,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu) | |||
3463 | uncore_change_context(pci_uncores, cpu, target); | 3463 | uncore_change_context(pci_uncores, cpu, target); |
3464 | } | 3464 | } |
3465 | 3465 | ||
3466 | static void __cpuinit uncore_event_init_cpu(int cpu) | 3466 | static void uncore_event_init_cpu(int cpu) |
3467 | { | 3467 | { |
3468 | int i, phys_id; | 3468 | int i, phys_id; |
3469 | 3469 | ||
@@ -3479,8 +3479,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu) | |||
3479 | uncore_change_context(pci_uncores, -1, cpu); | 3479 | uncore_change_context(pci_uncores, -1, cpu); |
3480 | } | 3480 | } |
3481 | 3481 | ||
3482 | static int | 3482 | static int uncore_cpu_notifier(struct notifier_block *self, |
3483 | __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | 3483 | unsigned long action, void *hcpu) |
3484 | { | 3484 | { |
3485 | unsigned int cpu = (long)hcpu; | 3485 | unsigned int cpu = (long)hcpu; |
3486 | 3486 | ||
@@ -3520,7 +3520,7 @@ static int | |||
3520 | return NOTIFY_OK; | 3520 | return NOTIFY_OK; |
3521 | } | 3521 | } |
3522 | 3522 | ||
3523 | static struct notifier_block uncore_cpu_nb __cpuinitdata = { | 3523 | static struct notifier_block uncore_cpu_nb = { |
3524 | .notifier_call = uncore_cpu_notifier, | 3524 | .notifier_call = uncore_cpu_notifier, |
3525 | /* | 3525 | /* |
3526 | * to migrate uncore events, our notifier should be executed | 3526 | * to migrate uncore events, our notifier should be executed |
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c index feca286c2bb4..88db010845cb 100644 --- a/arch/x86/kernel/cpu/rdrand.c +++ b/arch/x86/kernel/cpu/rdrand.c | |||
@@ -52,7 +52,7 @@ static inline int rdrand_long(unsigned long *v) | |||
52 | */ | 52 | */ |
53 | #define RESEED_LOOP ((512*128)/sizeof(unsigned long)) | 53 | #define RESEED_LOOP ((512*128)/sizeof(unsigned long)) |
54 | 54 | ||
55 | void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) | 55 | void x86_init_rdrand(struct cpuinfo_x86 *c) |
56 | { | 56 | { |
57 | #ifdef CONFIG_ARCH_RANDOM | 57 | #ifdef CONFIG_ARCH_RANDOM |
58 | unsigned long tmp; | 58 | unsigned long tmp; |
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c index d92b5dad15dd..f2cc63e9cf08 100644 --- a/arch/x86/kernel/cpu/scattered.c +++ b/arch/x86/kernel/cpu/scattered.c | |||
@@ -24,13 +24,13 @@ enum cpuid_regs { | |||
24 | CR_EBX | 24 | CR_EBX |
25 | }; | 25 | }; |
26 | 26 | ||
27 | void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) | 27 | void init_scattered_cpuid_features(struct cpuinfo_x86 *c) |
28 | { | 28 | { |
29 | u32 max_level; | 29 | u32 max_level; |
30 | u32 regs[4]; | 30 | u32 regs[4]; |
31 | const struct cpuid_bit *cb; | 31 | const struct cpuid_bit *cb; |
32 | 32 | ||
33 | static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { | 33 | static const struct cpuid_bit cpuid_bits[] = { |
34 | { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, | 34 | { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, |
35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, | 35 | { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, |
36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, | 36 | { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, |
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c index 4397e987a1cf..4c60eaf0571c 100644 --- a/arch/x86/kernel/cpu/topology.c +++ b/arch/x86/kernel/cpu/topology.c | |||
@@ -26,7 +26,7 @@ | |||
26 | * exists, use it for populating initial_apicid and cpu topology | 26 | * exists, use it for populating initial_apicid and cpu topology |
27 | * detection. | 27 | * detection. |
28 | */ | 28 | */ |
29 | void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) | 29 | void detect_extended_topology(struct cpuinfo_x86 *c) |
30 | { | 30 | { |
31 | #ifdef CONFIG_SMP | 31 | #ifdef CONFIG_SMP |
32 | unsigned int eax, ebx, ecx, edx, sub_index; | 32 | unsigned int eax, ebx, ecx, edx, sub_index; |
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index 28000743bbb0..aa0430d69b90 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <asm/msr.h> | 5 | #include <asm/msr.h> |
6 | #include "cpu.h" | 6 | #include "cpu.h" |
7 | 7 | ||
8 | static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) | 8 | static void early_init_transmeta(struct cpuinfo_x86 *c) |
9 | { | 9 | { |
10 | u32 xlvl; | 10 | u32 xlvl; |
11 | 11 | ||
@@ -17,7 +17,7 @@ static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) | |||
17 | } | 17 | } |
18 | } | 18 | } |
19 | 19 | ||
20 | static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | 20 | static void init_transmeta(struct cpuinfo_x86 *c) |
21 | { | 21 | { |
22 | unsigned int cap_mask, uk, max, dummy; | 22 | unsigned int cap_mask, uk, max, dummy; |
23 | unsigned int cms_rev1, cms_rev2; | 23 | unsigned int cms_rev1, cms_rev2; |
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) | |||
98 | #endif | 98 | #endif |
99 | } | 99 | } |
100 | 100 | ||
101 | static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { | 101 | static const struct cpu_dev transmeta_cpu_dev = { |
102 | .c_vendor = "Transmeta", | 102 | .c_vendor = "Transmeta", |
103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, | 103 | .c_ident = { "GenuineTMx86", "TransmetaCPU" }, |
104 | .c_early_init = early_init_transmeta, | 104 | .c_early_init = early_init_transmeta, |
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c index fd2c37bf7acb..202759a14121 100644 --- a/arch/x86/kernel/cpu/umc.c +++ b/arch/x86/kernel/cpu/umc.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * so no special init takes place. | 8 | * so no special init takes place. |
9 | */ | 9 | */ |
10 | 10 | ||
11 | static const struct cpu_dev __cpuinitconst umc_cpu_dev = { | 11 | static const struct cpu_dev umc_cpu_dev = { |
12 | .c_vendor = "UMC", | 12 | .c_vendor = "UMC", |
13 | .c_ident = { "UMC UMC UMC" }, | 13 | .c_ident = { "UMC UMC UMC" }, |
14 | .c_models = { | 14 | .c_models = { |
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 03a36321ec54..7076878404ec 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c | |||
@@ -122,7 +122,7 @@ static bool __init vmware_platform(void) | |||
122 | * so that the kernel could just trust the hypervisor with providing a | 122 | * so that the kernel could just trust the hypervisor with providing a |
123 | * reliable virtual TSC that is suitable for timekeeping. | 123 | * reliable virtual TSC that is suitable for timekeeping. |
124 | */ | 124 | */ |
125 | static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) | 125 | static void vmware_set_cpu_features(struct cpuinfo_x86 *c) |
126 | { | 126 | { |
127 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); | 127 | set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); |
128 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); | 128 | set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); |
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c index 1e4dbcfe6d31..7d9481c743f8 100644 --- a/arch/x86/kernel/cpuid.c +++ b/arch/x86/kernel/cpuid.c | |||
@@ -137,7 +137,7 @@ static const struct file_operations cpuid_fops = { | |||
137 | .open = cpuid_open, | 137 | .open = cpuid_open, |
138 | }; | 138 | }; |
139 | 139 | ||
140 | static __cpuinit int cpuid_device_create(int cpu) | 140 | static int cpuid_device_create(int cpu) |
141 | { | 141 | { |
142 | struct device *dev; | 142 | struct device *dev; |
143 | 143 | ||
@@ -151,9 +151,8 @@ static void cpuid_device_destroy(int cpu) | |||
151 | device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); | 151 | device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); |
152 | } | 152 | } |
153 | 153 | ||
154 | static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, | 154 | static int cpuid_class_cpu_callback(struct notifier_block *nfb, |
155 | unsigned long action, | 155 | unsigned long action, void *hcpu) |
156 | void *hcpu) | ||
157 | { | 156 | { |
158 | unsigned int cpu = (unsigned long)hcpu; | 157 | unsigned int cpu = (unsigned long)hcpu; |
159 | int err = 0; | 158 | int err = 0; |
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c index 4934890e4db2..69eb2fa25494 100644 --- a/arch/x86/kernel/devicetree.c +++ b/arch/x86/kernel/devicetree.c | |||
@@ -133,7 +133,7 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev) | |||
133 | { | 133 | { |
134 | } | 134 | } |
135 | 135 | ||
136 | void __cpuinit x86_of_pci_init(void) | 136 | void x86_of_pci_init(void) |
137 | { | 137 | { |
138 | pcibios_enable_irq = x86_of_pci_irq_enable; | 138 | pcibios_enable_irq = x86_of_pci_irq_enable; |
139 | pcibios_disable_irq = x86_of_pci_irq_disable; | 139 | pcibios_disable_irq = x86_of_pci_irq_disable; |
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index e65ddc62e113..5dd87a89f011 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S | |||
@@ -292,7 +292,6 @@ ENDPROC(start_cpu0) | |||
292 | * If cpu hotplug is not supported then this code can go in init section | 292 | * If cpu hotplug is not supported then this code can go in init section |
293 | * which will be freed later | 293 | * which will be freed later |
294 | */ | 294 | */ |
295 | __CPUINIT | ||
296 | ENTRY(startup_32_smp) | 295 | ENTRY(startup_32_smp) |
297 | cld | 296 | cld |
298 | movl $(__BOOT_DS),%eax | 297 | movl $(__BOOT_DS),%eax |
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index b627746f6b1a..202d24f0f7e7 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c | |||
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(unlazy_fpu); | |||
108 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; | 108 | unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; |
109 | unsigned int xstate_size; | 109 | unsigned int xstate_size; |
110 | EXPORT_SYMBOL_GPL(xstate_size); | 110 | EXPORT_SYMBOL_GPL(xstate_size); |
111 | static struct i387_fxsave_struct fx_scratch __cpuinitdata; | 111 | static struct i387_fxsave_struct fx_scratch; |
112 | 112 | ||
113 | static void __cpuinit mxcsr_feature_mask_init(void) | 113 | static void mxcsr_feature_mask_init(void) |
114 | { | 114 | { |
115 | unsigned long mask = 0; | 115 | unsigned long mask = 0; |
116 | 116 | ||
@@ -124,7 +124,7 @@ static void __cpuinit mxcsr_feature_mask_init(void) | |||
124 | mxcsr_feature_mask &= mask; | 124 | mxcsr_feature_mask &= mask; |
125 | } | 125 | } |
126 | 126 | ||
127 | static void __cpuinit init_thread_xstate(void) | 127 | static void init_thread_xstate(void) |
128 | { | 128 | { |
129 | /* | 129 | /* |
130 | * Note that xstate_size might be overwriten later during | 130 | * Note that xstate_size might be overwriten later during |
@@ -153,7 +153,7 @@ static void __cpuinit init_thread_xstate(void) | |||
153 | * into all processes. | 153 | * into all processes. |
154 | */ | 154 | */ |
155 | 155 | ||
156 | void __cpuinit fpu_init(void) | 156 | void fpu_init(void) |
157 | { | 157 | { |
158 | unsigned long cr0; | 158 | unsigned long cr0; |
159 | unsigned long cr4_mask = 0; | 159 | unsigned long cr4_mask = 0; |
@@ -608,7 +608,7 @@ static int __init no_387(char *s) | |||
608 | 608 | ||
609 | __setup("no387", no_387); | 609 | __setup("no387", no_387); |
610 | 610 | ||
611 | void __cpuinit fpu_detect(struct cpuinfo_x86 *c) | 611 | void fpu_detect(struct cpuinfo_x86 *c) |
612 | { | 612 | { |
613 | unsigned long cr0; | 613 | unsigned long cr0; |
614 | u16 fsw, fcw; | 614 | u16 fsw, fcw; |
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c index 344faf8d0d62..4186755f1d7c 100644 --- a/arch/x86/kernel/irq_32.c +++ b/arch/x86/kernel/irq_32.c | |||
@@ -119,7 +119,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) | |||
119 | /* | 119 | /* |
120 | * allocate per-cpu stacks for hardirq and for softirq processing | 120 | * allocate per-cpu stacks for hardirq and for softirq processing |
121 | */ | 121 | */ |
122 | void __cpuinit irq_ctx_init(int cpu) | 122 | void irq_ctx_init(int cpu) |
123 | { | 123 | { |
124 | union irq_ctx *irqctx; | 124 | union irq_ctx *irqctx; |
125 | 125 | ||
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index cd6d9a5a42f6..a96d32cc55b8 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c | |||
@@ -320,7 +320,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val) | |||
320 | apic_write(APIC_EOI, APIC_EOI_ACK); | 320 | apic_write(APIC_EOI, APIC_EOI_ACK); |
321 | } | 321 | } |
322 | 322 | ||
323 | void __cpuinit kvm_guest_cpu_init(void) | 323 | void kvm_guest_cpu_init(void) |
324 | { | 324 | { |
325 | if (!kvm_para_available()) | 325 | if (!kvm_para_available()) |
326 | return; | 326 | return; |
@@ -421,7 +421,7 @@ static void __init kvm_smp_prepare_boot_cpu(void) | |||
421 | native_smp_prepare_boot_cpu(); | 421 | native_smp_prepare_boot_cpu(); |
422 | } | 422 | } |
423 | 423 | ||
424 | static void __cpuinit kvm_guest_cpu_online(void *dummy) | 424 | static void kvm_guest_cpu_online(void *dummy) |
425 | { | 425 | { |
426 | kvm_guest_cpu_init(); | 426 | kvm_guest_cpu_init(); |
427 | } | 427 | } |
@@ -435,8 +435,8 @@ static void kvm_guest_cpu_offline(void *dummy) | |||
435 | apf_task_wake_all(); | 435 | apf_task_wake_all(); |
436 | } | 436 | } |
437 | 437 | ||
438 | static int __cpuinit kvm_cpu_notify(struct notifier_block *self, | 438 | static int kvm_cpu_notify(struct notifier_block *self, unsigned long action, |
439 | unsigned long action, void *hcpu) | 439 | void *hcpu) |
440 | { | 440 | { |
441 | int cpu = (unsigned long)hcpu; | 441 | int cpu = (unsigned long)hcpu; |
442 | switch (action) { | 442 | switch (action) { |
@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self, | |||
455 | return NOTIFY_OK; | 455 | return NOTIFY_OK; |
456 | } | 456 | } |
457 | 457 | ||
458 | static struct notifier_block __cpuinitdata kvm_cpu_notifier = { | 458 | static struct notifier_block kvm_cpu_notifier = { |
459 | .notifier_call = kvm_cpu_notify, | 459 | .notifier_call = kvm_cpu_notify, |
460 | }; | 460 | }; |
461 | #endif | 461 | #endif |
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c index 1f354f4b602b..1570e0741344 100644 --- a/arch/x86/kernel/kvmclock.c +++ b/arch/x86/kernel/kvmclock.c | |||
@@ -182,7 +182,7 @@ static void kvm_restore_sched_clock_state(void) | |||
182 | } | 182 | } |
183 | 183 | ||
184 | #ifdef CONFIG_X86_LOCAL_APIC | 184 | #ifdef CONFIG_X86_LOCAL_APIC |
185 | static void __cpuinit kvm_setup_secondary_clock(void) | 185 | static void kvm_setup_secondary_clock(void) |
186 | { | 186 | { |
187 | /* | 187 | /* |
188 | * Now that the first cpu already had this clocksource initialized, | 188 | * Now that the first cpu already had this clocksource initialized, |
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c index 1ac6e9aee766..1d14ffee5749 100644 --- a/arch/x86/kernel/microcode_amd_early.c +++ b/arch/x86/kernel/microcode_amd_early.c | |||
@@ -82,7 +82,7 @@ static struct cpio_data __init find_ucode_in_initrd(void) | |||
82 | * load_microcode_amd() to save equivalent cpu table and microcode patches in | 82 | * load_microcode_amd() to save equivalent cpu table and microcode patches in |
83 | * kernel heap memory. | 83 | * kernel heap memory. |
84 | */ | 84 | */ |
85 | static void __cpuinit apply_ucode_in_initrd(void *ucode, size_t size) | 85 | static void apply_ucode_in_initrd(void *ucode, size_t size) |
86 | { | 86 | { |
87 | struct equiv_cpu_entry *eq; | 87 | struct equiv_cpu_entry *eq; |
88 | u32 *header; | 88 | u32 *header; |
@@ -206,7 +206,7 @@ u8 amd_bsp_mpb[MPB_MAX_SIZE]; | |||
206 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which | 206 | * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which |
207 | * is used upon resume from suspend. | 207 | * is used upon resume from suspend. |
208 | */ | 208 | */ |
209 | void __cpuinit load_ucode_amd_ap(void) | 209 | void load_ucode_amd_ap(void) |
210 | { | 210 | { |
211 | struct microcode_amd *mc; | 211 | struct microcode_amd *mc; |
212 | unsigned long *initrd; | 212 | unsigned long *initrd; |
@@ -238,7 +238,7 @@ static void __init collect_cpu_sig_on_bsp(void *arg) | |||
238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); | 238 | uci->cpu_sig.sig = cpuid_eax(0x00000001); |
239 | } | 239 | } |
240 | #else | 240 | #else |
241 | static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, | 241 | static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c, |
242 | struct ucode_cpu_info *uci) | 242 | struct ucode_cpu_info *uci) |
243 | { | 243 | { |
244 | u32 rev, eax; | 244 | u32 rev, eax; |
@@ -252,7 +252,7 @@ static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, | |||
252 | c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); | 252 | c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); |
253 | } | 253 | } |
254 | 254 | ||
255 | void __cpuinit load_ucode_amd_ap(void) | 255 | void load_ucode_amd_ap(void) |
256 | { | 256 | { |
257 | unsigned int cpu = smp_processor_id(); | 257 | unsigned int cpu = smp_processor_id(); |
258 | 258 | ||
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c index 22db92bbdf1a..15c987698b0f 100644 --- a/arch/x86/kernel/microcode_core.c +++ b/arch/x86/kernel/microcode_core.c | |||
@@ -468,7 +468,7 @@ static struct syscore_ops mc_syscore_ops = { | |||
468 | .resume = mc_bp_resume, | 468 | .resume = mc_bp_resume, |
469 | }; | 469 | }; |
470 | 470 | ||
471 | static __cpuinit int | 471 | static int |
472 | mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) | 472 | mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) |
473 | { | 473 | { |
474 | unsigned int cpu = (unsigned long)hcpu; | 474 | unsigned int cpu = (unsigned long)hcpu; |
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c index 86119f63db0c..be7f8514f577 100644 --- a/arch/x86/kernel/microcode_core_early.c +++ b/arch/x86/kernel/microcode_core_early.c | |||
@@ -41,7 +41,7 @@ | |||
41 | * | 41 | * |
42 | * x86_vendor() gets vendor information directly through cpuid. | 42 | * x86_vendor() gets vendor information directly through cpuid. |
43 | */ | 43 | */ |
44 | static int __cpuinit x86_vendor(void) | 44 | static int x86_vendor(void) |
45 | { | 45 | { |
46 | u32 eax = 0x00000000; | 46 | u32 eax = 0x00000000; |
47 | u32 ebx, ecx = 0, edx; | 47 | u32 ebx, ecx = 0, edx; |
@@ -57,7 +57,7 @@ static int __cpuinit x86_vendor(void) | |||
57 | return X86_VENDOR_UNKNOWN; | 57 | return X86_VENDOR_UNKNOWN; |
58 | } | 58 | } |
59 | 59 | ||
60 | static int __cpuinit x86_family(void) | 60 | static int x86_family(void) |
61 | { | 61 | { |
62 | u32 eax = 0x00000001; | 62 | u32 eax = 0x00000001; |
63 | u32 ebx, ecx = 0, edx; | 63 | u32 ebx, ecx = 0, edx; |
@@ -96,7 +96,7 @@ void __init load_ucode_bsp(void) | |||
96 | } | 96 | } |
97 | } | 97 | } |
98 | 98 | ||
99 | void __cpuinit load_ucode_ap(void) | 99 | void load_ucode_ap(void) |
100 | { | 100 | { |
101 | int vendor, x86; | 101 | int vendor, x86; |
102 | 102 | ||
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c index dabef95506f3..1575deb2e636 100644 --- a/arch/x86/kernel/microcode_intel_early.c +++ b/arch/x86/kernel/microcode_intel_early.c | |||
@@ -34,7 +34,7 @@ struct mc_saved_data { | |||
34 | struct microcode_intel **mc_saved; | 34 | struct microcode_intel **mc_saved; |
35 | } mc_saved_data; | 35 | } mc_saved_data; |
36 | 36 | ||
37 | static enum ucode_state __cpuinit | 37 | static enum ucode_state |
38 | generic_load_microcode_early(struct microcode_intel **mc_saved_p, | 38 | generic_load_microcode_early(struct microcode_intel **mc_saved_p, |
39 | unsigned int mc_saved_count, | 39 | unsigned int mc_saved_count, |
40 | struct ucode_cpu_info *uci) | 40 | struct ucode_cpu_info *uci) |
@@ -69,7 +69,7 @@ out: | |||
69 | return state; | 69 | return state; |
70 | } | 70 | } |
71 | 71 | ||
72 | static void __cpuinit | 72 | static void |
73 | microcode_pointer(struct microcode_intel **mc_saved, | 73 | microcode_pointer(struct microcode_intel **mc_saved, |
74 | unsigned long *mc_saved_in_initrd, | 74 | unsigned long *mc_saved_in_initrd, |
75 | unsigned long initrd_start, int mc_saved_count) | 75 | unsigned long initrd_start, int mc_saved_count) |
@@ -82,7 +82,7 @@ microcode_pointer(struct microcode_intel **mc_saved, | |||
82 | } | 82 | } |
83 | 83 | ||
84 | #ifdef CONFIG_X86_32 | 84 | #ifdef CONFIG_X86_32 |
85 | static void __cpuinit | 85 | static void |
86 | microcode_phys(struct microcode_intel **mc_saved_tmp, | 86 | microcode_phys(struct microcode_intel **mc_saved_tmp, |
87 | struct mc_saved_data *mc_saved_data) | 87 | struct mc_saved_data *mc_saved_data) |
88 | { | 88 | { |
@@ -101,7 +101,7 @@ microcode_phys(struct microcode_intel **mc_saved_tmp, | |||
101 | } | 101 | } |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | static enum ucode_state __cpuinit | 104 | static enum ucode_state |
105 | load_microcode(struct mc_saved_data *mc_saved_data, | 105 | load_microcode(struct mc_saved_data *mc_saved_data, |
106 | unsigned long *mc_saved_in_initrd, | 106 | unsigned long *mc_saved_in_initrd, |
107 | unsigned long initrd_start, | 107 | unsigned long initrd_start, |
@@ -375,7 +375,7 @@ do { \ | |||
375 | #define native_wrmsr(msr, low, high) \ | 375 | #define native_wrmsr(msr, low, high) \ |
376 | native_write_msr(msr, low, high); | 376 | native_write_msr(msr, low, high); |
377 | 377 | ||
378 | static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci) | 378 | static int collect_cpu_info_early(struct ucode_cpu_info *uci) |
379 | { | 379 | { |
380 | unsigned int val[2]; | 380 | unsigned int val[2]; |
381 | u8 x86, x86_model; | 381 | u8 x86, x86_model; |
@@ -584,7 +584,7 @@ scan_microcode(unsigned long start, unsigned long end, | |||
584 | /* | 584 | /* |
585 | * Print ucode update info. | 585 | * Print ucode update info. |
586 | */ | 586 | */ |
587 | static void __cpuinit | 587 | static void |
588 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) | 588 | print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) |
589 | { | 589 | { |
590 | int cpu = smp_processor_id(); | 590 | int cpu = smp_processor_id(); |
@@ -605,7 +605,7 @@ static int current_mc_date; | |||
605 | /* | 605 | /* |
606 | * Print early updated ucode info after printk works. This is delayed info dump. | 606 | * Print early updated ucode info after printk works. This is delayed info dump. |
607 | */ | 607 | */ |
608 | void __cpuinit show_ucode_info_early(void) | 608 | void show_ucode_info_early(void) |
609 | { | 609 | { |
610 | struct ucode_cpu_info uci; | 610 | struct ucode_cpu_info uci; |
611 | 611 | ||
@@ -621,7 +621,7 @@ void __cpuinit show_ucode_info_early(void) | |||
621 | * mc_saved_data.mc_saved and delay printing microcode info in | 621 | * mc_saved_data.mc_saved and delay printing microcode info in |
622 | * show_ucode_info_early() until printk() works. | 622 | * show_ucode_info_early() until printk() works. |
623 | */ | 623 | */ |
624 | static void __cpuinit print_ucode(struct ucode_cpu_info *uci) | 624 | static void print_ucode(struct ucode_cpu_info *uci) |
625 | { | 625 | { |
626 | struct microcode_intel *mc_intel; | 626 | struct microcode_intel *mc_intel; |
627 | int *delay_ucode_info_p; | 627 | int *delay_ucode_info_p; |
@@ -643,12 +643,12 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci) | |||
643 | * Flush global tlb. We only do this in x86_64 where paging has been enabled | 643 | * Flush global tlb. We only do this in x86_64 where paging has been enabled |
644 | * already and PGE should be enabled as well. | 644 | * already and PGE should be enabled as well. |
645 | */ | 645 | */ |
646 | static inline void __cpuinit flush_tlb_early(void) | 646 | static inline void flush_tlb_early(void) |
647 | { | 647 | { |
648 | __native_flush_tlb_global_irq_disabled(); | 648 | __native_flush_tlb_global_irq_disabled(); |
649 | } | 649 | } |
650 | 650 | ||
651 | static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) | 651 | static inline void print_ucode(struct ucode_cpu_info *uci) |
652 | { | 652 | { |
653 | struct microcode_intel *mc_intel; | 653 | struct microcode_intel *mc_intel; |
654 | 654 | ||
@@ -660,8 +660,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) | |||
660 | } | 660 | } |
661 | #endif | 661 | #endif |
662 | 662 | ||
663 | static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, | 663 | static int apply_microcode_early(struct mc_saved_data *mc_saved_data, |
664 | struct ucode_cpu_info *uci) | 664 | struct ucode_cpu_info *uci) |
665 | { | 665 | { |
666 | struct microcode_intel *mc_intel; | 666 | struct microcode_intel *mc_intel; |
667 | unsigned int val[2]; | 667 | unsigned int val[2]; |
@@ -763,7 +763,7 @@ load_ucode_intel_bsp(void) | |||
763 | #endif | 763 | #endif |
764 | } | 764 | } |
765 | 765 | ||
766 | void __cpuinit load_ucode_intel_ap(void) | 766 | void load_ucode_intel_ap(void) |
767 | { | 767 | { |
768 | struct mc_saved_data *mc_saved_data_p; | 768 | struct mc_saved_data *mc_saved_data_p; |
769 | struct ucode_cpu_info uci; | 769 | struct ucode_cpu_info uci; |
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c index ac861b8348e2..f4c886d9165c 100644 --- a/arch/x86/kernel/mmconf-fam10h_64.c +++ b/arch/x86/kernel/mmconf-fam10h_64.c | |||
@@ -24,14 +24,14 @@ struct pci_hostbridge_probe { | |||
24 | u32 device; | 24 | u32 device; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | static u64 __cpuinitdata fam10h_pci_mmconf_base; | 27 | static u64 fam10h_pci_mmconf_base; |
28 | 28 | ||
29 | static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { | 29 | static struct pci_hostbridge_probe pci_probes[] = { |
30 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, | 30 | { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, |
31 | { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, | 31 | { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, |
32 | }; | 32 | }; |
33 | 33 | ||
34 | static int __cpuinit cmp_range(const void *x1, const void *x2) | 34 | static int cmp_range(const void *x1, const void *x2) |
35 | { | 35 | { |
36 | const struct range *r1 = x1; | 36 | const struct range *r1 = x1; |
37 | const struct range *r2 = x2; | 37 | const struct range *r2 = x2; |
@@ -49,7 +49,7 @@ static int __cpuinit cmp_range(const void *x1, const void *x2) | |||
49 | /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ | 49 | /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ |
50 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) | 50 | #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) |
51 | #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) | 51 | #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) |
52 | static void __cpuinit get_fam10h_pci_mmconf_base(void) | 52 | static void get_fam10h_pci_mmconf_base(void) |
53 | { | 53 | { |
54 | int i; | 54 | int i; |
55 | unsigned bus; | 55 | unsigned bus; |
@@ -166,7 +166,7 @@ out: | |||
166 | fam10h_pci_mmconf_base = base; | 166 | fam10h_pci_mmconf_base = base; |
167 | } | 167 | } |
168 | 168 | ||
169 | void __cpuinit fam10h_check_enable_mmcfg(void) | 169 | void fam10h_check_enable_mmcfg(void) |
170 | { | 170 | { |
171 | u64 val; | 171 | u64 val; |
172 | u32 address; | 172 | u32 address; |
@@ -230,7 +230,7 @@ static const struct dmi_system_id __initconst mmconf_dmi_table[] = { | |||
230 | {} | 230 | {} |
231 | }; | 231 | }; |
232 | 232 | ||
233 | /* Called from a __cpuinit function, but only on the BSP. */ | 233 | /* Called from a non __init function, but only on the BSP. */ |
234 | void __ref check_enable_amd_mmconf_dmi(void) | 234 | void __ref check_enable_amd_mmconf_dmi(void) |
235 | { | 235 | { |
236 | dmi_check_system(mmconf_dmi_table); | 236 | dmi_check_system(mmconf_dmi_table); |
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c index ce130493b802..88458faea2f8 100644 --- a/arch/x86/kernel/msr.c +++ b/arch/x86/kernel/msr.c | |||
@@ -200,7 +200,7 @@ static const struct file_operations msr_fops = { | |||
200 | .compat_ioctl = msr_ioctl, | 200 | .compat_ioctl = msr_ioctl, |
201 | }; | 201 | }; |
202 | 202 | ||
203 | static int __cpuinit msr_device_create(int cpu) | 203 | static int msr_device_create(int cpu) |
204 | { | 204 | { |
205 | struct device *dev; | 205 | struct device *dev; |
206 | 206 | ||
@@ -214,8 +214,8 @@ static void msr_device_destroy(int cpu) | |||
214 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); | 214 | device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); |
215 | } | 215 | } |
216 | 216 | ||
217 | static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, | 217 | static int msr_class_cpu_callback(struct notifier_block *nfb, |
218 | unsigned long action, void *hcpu) | 218 | unsigned long action, void *hcpu) |
219 | { | 219 | { |
220 | unsigned int cpu = (unsigned long)hcpu; | 220 | unsigned int cpu = (unsigned long)hcpu; |
221 | int err = 0; | 221 | int err = 0; |
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c index 81a5f5e8f142..83369e5a1d27 100644 --- a/arch/x86/kernel/process.c +++ b/arch/x86/kernel/process.c | |||
@@ -398,7 +398,7 @@ static void amd_e400_idle(void) | |||
398 | default_idle(); | 398 | default_idle(); |
399 | } | 399 | } |
400 | 400 | ||
401 | void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) | 401 | void select_idle_routine(const struct cpuinfo_x86 *c) |
402 | { | 402 | { |
403 | #ifdef CONFIG_SMP | 403 | #ifdef CONFIG_SMP |
404 | if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) | 404 | if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) |
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index e68709da8251..f8ec57815c05 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -170,7 +170,7 @@ static struct resource bss_resource = { | |||
170 | 170 | ||
171 | #ifdef CONFIG_X86_32 | 171 | #ifdef CONFIG_X86_32 |
172 | /* cpu data as detected by the assembly code in head.S */ | 172 | /* cpu data as detected by the assembly code in head.S */ |
173 | struct cpuinfo_x86 new_cpu_data __cpuinitdata = { | 173 | struct cpuinfo_x86 new_cpu_data = { |
174 | .wp_works_ok = -1, | 174 | .wp_works_ok = -1, |
175 | }; | 175 | }; |
176 | /* common cpu data for all cpus */ | 176 | /* common cpu data for all cpus */ |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index bfd348e99369..aecc98a93d1b 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -130,7 +130,7 @@ atomic_t init_deasserted; | |||
130 | * Report back to the Boot Processor during boot time or to the caller processor | 130 | * Report back to the Boot Processor during boot time or to the caller processor |
131 | * during CPU online. | 131 | * during CPU online. |
132 | */ | 132 | */ |
133 | static void __cpuinit smp_callin(void) | 133 | static void smp_callin(void) |
134 | { | 134 | { |
135 | int cpuid, phys_id; | 135 | int cpuid, phys_id; |
136 | unsigned long timeout; | 136 | unsigned long timeout; |
@@ -237,7 +237,7 @@ static int enable_start_cpu0; | |||
237 | /* | 237 | /* |
238 | * Activate a secondary processor. | 238 | * Activate a secondary processor. |
239 | */ | 239 | */ |
240 | notrace static void __cpuinit start_secondary(void *unused) | 240 | static void notrace start_secondary(void *unused) |
241 | { | 241 | { |
242 | /* | 242 | /* |
243 | * Don't put *anything* before cpu_init(), SMP booting is too | 243 | * Don't put *anything* before cpu_init(), SMP booting is too |
@@ -300,7 +300,7 @@ void __init smp_store_boot_cpu_info(void) | |||
300 | * The bootstrap kernel entry code has set these up. Save them for | 300 | * The bootstrap kernel entry code has set these up. Save them for |
301 | * a given CPU | 301 | * a given CPU |
302 | */ | 302 | */ |
303 | void __cpuinit smp_store_cpu_info(int id) | 303 | void smp_store_cpu_info(int id) |
304 | { | 304 | { |
305 | struct cpuinfo_x86 *c = &cpu_data(id); | 305 | struct cpuinfo_x86 *c = &cpu_data(id); |
306 | 306 | ||
@@ -313,7 +313,7 @@ void __cpuinit smp_store_cpu_info(int id) | |||
313 | identify_secondary_cpu(c); | 313 | identify_secondary_cpu(c); |
314 | } | 314 | } |
315 | 315 | ||
316 | static bool __cpuinit | 316 | static bool |
317 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) | 317 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) |
318 | { | 318 | { |
319 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 319 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
@@ -330,7 +330,7 @@ do { \ | |||
330 | cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ | 330 | cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ |
331 | } while (0) | 331 | } while (0) |
332 | 332 | ||
333 | static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 333 | static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
334 | { | 334 | { |
335 | if (cpu_has_topoext) { | 335 | if (cpu_has_topoext) { |
336 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 336 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
@@ -348,7 +348,7 @@ static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
348 | return false; | 348 | return false; |
349 | } | 349 | } |
350 | 350 | ||
351 | static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 351 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
352 | { | 352 | { |
353 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; | 353 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
354 | 354 | ||
@@ -359,7 +359,7 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
359 | return false; | 359 | return false; |
360 | } | 360 | } |
361 | 361 | ||
362 | static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | 362 | static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
363 | { | 363 | { |
364 | if (c->phys_proc_id == o->phys_proc_id) { | 364 | if (c->phys_proc_id == o->phys_proc_id) { |
365 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) | 365 | if (cpu_has(c, X86_FEATURE_AMD_DCM)) |
@@ -370,7 +370,7 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) | |||
370 | return false; | 370 | return false; |
371 | } | 371 | } |
372 | 372 | ||
373 | void __cpuinit set_cpu_sibling_map(int cpu) | 373 | void set_cpu_sibling_map(int cpu) |
374 | { | 374 | { |
375 | bool has_smt = smp_num_siblings > 1; | 375 | bool has_smt = smp_num_siblings > 1; |
376 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; | 376 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; |
@@ -499,7 +499,7 @@ void __inquire_remote_apic(int apicid) | |||
499 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | 499 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this |
500 | * won't ... remember to clear down the APIC, etc later. | 500 | * won't ... remember to clear down the APIC, etc later. |
501 | */ | 501 | */ |
502 | int __cpuinit | 502 | int |
503 | wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) | 503 | wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) |
504 | { | 504 | { |
505 | unsigned long send_status, accept_status = 0; | 505 | unsigned long send_status, accept_status = 0; |
@@ -533,7 +533,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) | |||
533 | return (send_status | accept_status); | 533 | return (send_status | accept_status); |
534 | } | 534 | } |
535 | 535 | ||
536 | static int __cpuinit | 536 | static int |
537 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | 537 | wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) |
538 | { | 538 | { |
539 | unsigned long send_status, accept_status = 0; | 539 | unsigned long send_status, accept_status = 0; |
@@ -649,7 +649,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) | |||
649 | } | 649 | } |
650 | 650 | ||
651 | /* reduce the number of lines printed when booting a large cpu count system */ | 651 | /* reduce the number of lines printed when booting a large cpu count system */ |
652 | static void __cpuinit announce_cpu(int cpu, int apicid) | 652 | static void announce_cpu(int cpu, int apicid) |
653 | { | 653 | { |
654 | static int current_node = -1; | 654 | static int current_node = -1; |
655 | int node = early_cpu_to_node(cpu); | 655 | int node = early_cpu_to_node(cpu); |
@@ -691,7 +691,7 @@ static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs) | |||
691 | * We'll change this code in the future to wake up hard offlined CPU0 if | 691 | * We'll change this code in the future to wake up hard offlined CPU0 if |
692 | * real platform and request are available. | 692 | * real platform and request are available. |
693 | */ | 693 | */ |
694 | static int __cpuinit | 694 | static int |
695 | wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, | 695 | wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, |
696 | int *cpu0_nmi_registered) | 696 | int *cpu0_nmi_registered) |
697 | { | 697 | { |
@@ -731,7 +731,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, | |||
731 | * Returns zero if CPU booted OK, else error code from | 731 | * Returns zero if CPU booted OK, else error code from |
732 | * ->wakeup_secondary_cpu. | 732 | * ->wakeup_secondary_cpu. |
733 | */ | 733 | */ |
734 | static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) | 734 | static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle) |
735 | { | 735 | { |
736 | volatile u32 *trampoline_status = | 736 | volatile u32 *trampoline_status = |
737 | (volatile u32 *) __va(real_mode_header->trampoline_status); | 737 | (volatile u32 *) __va(real_mode_header->trampoline_status); |
@@ -872,7 +872,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) | |||
872 | return boot_error; | 872 | return boot_error; |
873 | } | 873 | } |
874 | 874 | ||
875 | int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) | 875 | int native_cpu_up(unsigned int cpu, struct task_struct *tidle) |
876 | { | 876 | { |
877 | int apicid = apic->cpu_present_to_apicid(cpu); | 877 | int apicid = apic->cpu_present_to_apicid(cpu); |
878 | unsigned long flags; | 878 | unsigned long flags; |
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c index 3ff42d2f046d..addf7b58f4e8 100644 --- a/arch/x86/kernel/tboot.c +++ b/arch/x86/kernel/tboot.c | |||
@@ -320,8 +320,8 @@ static int tboot_wait_for_aps(int num_aps) | |||
320 | return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); | 320 | return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); |
321 | } | 321 | } |
322 | 322 | ||
323 | static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, | 323 | static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action, |
324 | unsigned long action, void *hcpu) | 324 | void *hcpu) |
325 | { | 325 | { |
326 | switch (action) { | 326 | switch (action) { |
327 | case CPU_DYING: | 327 | case CPU_DYING: |
@@ -334,7 +334,7 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, | |||
334 | return NOTIFY_OK; | 334 | return NOTIFY_OK; |
335 | } | 335 | } |
336 | 336 | ||
337 | static struct notifier_block tboot_cpu_notifier __cpuinitdata = | 337 | static struct notifier_block tboot_cpu_notifier = |
338 | { | 338 | { |
339 | .notifier_call = tboot_cpu_callback, | 339 | .notifier_call = tboot_cpu_callback, |
340 | }; | 340 | }; |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 098b3cfda72e..6ff49247edf8 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -824,7 +824,7 @@ static void __init check_system_tsc_reliable(void) | |||
824 | * Make an educated guess if the TSC is trustworthy and synchronized | 824 | * Make an educated guess if the TSC is trustworthy and synchronized |
825 | * over all CPUs. | 825 | * over all CPUs. |
826 | */ | 826 | */ |
827 | __cpuinit int unsynchronized_tsc(void) | 827 | int unsynchronized_tsc(void) |
828 | { | 828 | { |
829 | if (!cpu_has_tsc || tsc_unstable) | 829 | if (!cpu_has_tsc || tsc_unstable) |
830 | return 1; | 830 | return 1; |
@@ -1020,7 +1020,7 @@ void __init tsc_init(void) | |||
1020 | * been calibrated. This assumes that CONSTANT_TSC applies to all | 1020 | * been calibrated. This assumes that CONSTANT_TSC applies to all |
1021 | * cpus in the socket - this should be a safe assumption. | 1021 | * cpus in the socket - this should be a safe assumption. |
1022 | */ | 1022 | */ |
1023 | unsigned long __cpuinit calibrate_delay_is_known(void) | 1023 | unsigned long calibrate_delay_is_known(void) |
1024 | { | 1024 | { |
1025 | int i, cpu = smp_processor_id(); | 1025 | int i, cpu = smp_processor_id(); |
1026 | 1026 | ||
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index fc25e60a5884..adfdf56a3714 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c | |||
@@ -25,24 +25,24 @@ | |||
25 | * Entry/exit counters that make sure that both CPUs | 25 | * Entry/exit counters that make sure that both CPUs |
26 | * run the measurement code at once: | 26 | * run the measurement code at once: |
27 | */ | 27 | */ |
28 | static __cpuinitdata atomic_t start_count; | 28 | static atomic_t start_count; |
29 | static __cpuinitdata atomic_t stop_count; | 29 | static atomic_t stop_count; |
30 | 30 | ||
31 | /* | 31 | /* |
32 | * We use a raw spinlock in this exceptional case, because | 32 | * We use a raw spinlock in this exceptional case, because |
33 | * we want to have the fastest, inlined, non-debug version | 33 | * we want to have the fastest, inlined, non-debug version |
34 | * of a critical section, to be able to prove TSC time-warps: | 34 | * of a critical section, to be able to prove TSC time-warps: |
35 | */ | 35 | */ |
36 | static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; | 36 | static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; |
37 | 37 | ||
38 | static __cpuinitdata cycles_t last_tsc; | 38 | static cycles_t last_tsc; |
39 | static __cpuinitdata cycles_t max_warp; | 39 | static cycles_t max_warp; |
40 | static __cpuinitdata int nr_warps; | 40 | static int nr_warps; |
41 | 41 | ||
42 | /* | 42 | /* |
43 | * TSC-warp measurement loop running on both CPUs: | 43 | * TSC-warp measurement loop running on both CPUs: |
44 | */ | 44 | */ |
45 | static __cpuinit void check_tsc_warp(unsigned int timeout) | 45 | static void check_tsc_warp(unsigned int timeout) |
46 | { | 46 | { |
47 | cycles_t start, now, prev, end; | 47 | cycles_t start, now, prev, end; |
48 | int i; | 48 | int i; |
@@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu) | |||
121 | * Source CPU calls into this - it waits for the freshly booted | 121 | * Source CPU calls into this - it waits for the freshly booted |
122 | * target CPU to arrive and then starts the measurement: | 122 | * target CPU to arrive and then starts the measurement: |
123 | */ | 123 | */ |
124 | void __cpuinit check_tsc_sync_source(int cpu) | 124 | void check_tsc_sync_source(int cpu) |
125 | { | 125 | { |
126 | int cpus = 2; | 126 | int cpus = 2; |
127 | 127 | ||
@@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu) | |||
187 | /* | 187 | /* |
188 | * Freshly booted CPUs call into this: | 188 | * Freshly booted CPUs call into this: |
189 | */ | 189 | */ |
190 | void __cpuinit check_tsc_sync_target(void) | 190 | void check_tsc_sync_target(void) |
191 | { | 191 | { |
192 | int cpus = 2; | 192 | int cpus = 2; |
193 | 193 | ||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index 9a907a67be8f..1f96f9347ed9 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -331,7 +331,7 @@ sigsegv: | |||
331 | * Assume __initcall executes before all user space. Hopefully kmod | 331 | * Assume __initcall executes before all user space. Hopefully kmod |
332 | * doesn't violate that. We'll find out if it does. | 332 | * doesn't violate that. We'll find out if it does. |
333 | */ | 333 | */ |
334 | static void __cpuinit vsyscall_set_cpu(int cpu) | 334 | static void vsyscall_set_cpu(int cpu) |
335 | { | 335 | { |
336 | unsigned long d; | 336 | unsigned long d; |
337 | unsigned long node = 0; | 337 | unsigned long node = 0; |
@@ -353,13 +353,13 @@ static void __cpuinit vsyscall_set_cpu(int cpu) | |||
353 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); | 353 | write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); |
354 | } | 354 | } |
355 | 355 | ||
356 | static void __cpuinit cpu_vsyscall_init(void *arg) | 356 | static void cpu_vsyscall_init(void *arg) |
357 | { | 357 | { |
358 | /* preemption should be already off */ | 358 | /* preemption should be already off */ |
359 | vsyscall_set_cpu(raw_smp_processor_id()); | 359 | vsyscall_set_cpu(raw_smp_processor_id()); |
360 | } | 360 | } |
361 | 361 | ||
362 | static int __cpuinit | 362 | static int |
363 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) | 363 | cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) |
364 | { | 364 | { |
365 | long cpu = (long)arg; | 365 | long cpu = (long)arg; |
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c index 45a14dbbddaf..5f24c71accaa 100644 --- a/arch/x86/kernel/x86_init.c +++ b/arch/x86/kernel/x86_init.c | |||
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/iommu.h> | 25 | #include <asm/iommu.h> |
26 | #include <asm/mach_traps.h> | 26 | #include <asm/mach_traps.h> |
27 | 27 | ||
28 | void __cpuinit x86_init_noop(void) { } | 28 | void x86_init_noop(void) { } |
29 | void __init x86_init_uint_noop(unsigned int unused) { } | 29 | void __init x86_init_uint_noop(unsigned int unused) { } |
30 | int __init iommu_init_noop(void) { return 0; } | 30 | int __init iommu_init_noop(void) { return 0; } |
31 | void iommu_shutdown_noop(void) { } | 31 | void iommu_shutdown_noop(void) { } |
@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = { | |||
85 | }, | 85 | }, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { | 88 | struct x86_cpuinit_ops x86_cpuinit = { |
89 | .early_percpu_clock_init = x86_init_noop, | 89 | .early_percpu_clock_init = x86_init_noop, |
90 | .setup_percpu_clockev = setup_secondary_APIC_clock, | 90 | .setup_percpu_clockev = setup_secondary_APIC_clock, |
91 | }; | 91 | }; |
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c index d6c28acdf99c..422fd8223470 100644 --- a/arch/x86/kernel/xsave.c +++ b/arch/x86/kernel/xsave.c | |||
@@ -573,7 +573,7 @@ static void __init xstate_enable_boot_cpu(void) | |||
573 | * This is somewhat obfuscated due to the lack of powerful enough | 573 | * This is somewhat obfuscated due to the lack of powerful enough |
574 | * overrides for the section checks. | 574 | * overrides for the section checks. |
575 | */ | 575 | */ |
576 | void __cpuinit xsave_init(void) | 576 | void xsave_init(void) |
577 | { | 577 | { |
578 | static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; | 578 | static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; |
579 | void (*this_func)(void); | 579 | void (*this_func)(void); |
@@ -594,7 +594,7 @@ static inline void __init eager_fpu_init_bp(void) | |||
594 | setup_init_fpu_buf(); | 594 | setup_init_fpu_buf(); |
595 | } | 595 | } |
596 | 596 | ||
597 | void __cpuinit eager_fpu_init(void) | 597 | void eager_fpu_init(void) |
598 | { | 598 | { |
599 | static __refdata void (*boot_func)(void) = eager_fpu_init_bp; | 599 | static __refdata void (*boot_func)(void) = eager_fpu_init_bp; |
600 | 600 | ||
diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c index dc0b727742f4..0057a7accfb1 100644 --- a/arch/x86/mm/mmio-mod.c +++ b/arch/x86/mm/mmio-mod.c | |||
@@ -410,9 +410,7 @@ out: | |||
410 | pr_warning("multiple CPUs still online, may miss events.\n"); | 410 | pr_warning("multiple CPUs still online, may miss events.\n"); |
411 | } | 411 | } |
412 | 412 | ||
413 | /* __ref because leave_uniprocessor calls cpu_up which is __cpuinit, | 413 | static void leave_uniprocessor(void) |
414 | but this whole function is ifdefed CONFIG_HOTPLUG_CPU */ | ||
415 | static void __ref leave_uniprocessor(void) | ||
416 | { | 414 | { |
417 | int cpu; | 415 | int cpu; |
418 | int err; | 416 | int err; |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index a71c4e207679..8bf93bae1f13 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -60,7 +60,7 @@ s16 __apicid_to_node[MAX_LOCAL_APIC] = { | |||
60 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | 60 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE |
61 | }; | 61 | }; |
62 | 62 | ||
63 | int __cpuinit numa_cpu_node(int cpu) | 63 | int numa_cpu_node(int cpu) |
64 | { | 64 | { |
65 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | 65 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); |
66 | 66 | ||
@@ -691,12 +691,12 @@ void __init init_cpu_to_node(void) | |||
691 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | 691 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
692 | 692 | ||
693 | # ifndef CONFIG_NUMA_EMU | 693 | # ifndef CONFIG_NUMA_EMU |
694 | void __cpuinit numa_add_cpu(int cpu) | 694 | void numa_add_cpu(int cpu) |
695 | { | 695 | { |
696 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 696 | cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
697 | } | 697 | } |
698 | 698 | ||
699 | void __cpuinit numa_remove_cpu(int cpu) | 699 | void numa_remove_cpu(int cpu) |
700 | { | 700 | { |
701 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 701 | cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
702 | } | 702 | } |
@@ -763,17 +763,17 @@ void debug_cpumask_set_cpu(int cpu, int node, bool enable) | |||
763 | } | 763 | } |
764 | 764 | ||
765 | # ifndef CONFIG_NUMA_EMU | 765 | # ifndef CONFIG_NUMA_EMU |
766 | static void __cpuinit numa_set_cpumask(int cpu, bool enable) | 766 | static void numa_set_cpumask(int cpu, bool enable) |
767 | { | 767 | { |
768 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); | 768 | debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable); |
769 | } | 769 | } |
770 | 770 | ||
771 | void __cpuinit numa_add_cpu(int cpu) | 771 | void numa_add_cpu(int cpu) |
772 | { | 772 | { |
773 | numa_set_cpumask(cpu, true); | 773 | numa_set_cpumask(cpu, true); |
774 | } | 774 | } |
775 | 775 | ||
776 | void __cpuinit numa_remove_cpu(int cpu) | 776 | void numa_remove_cpu(int cpu) |
777 | { | 777 | { |
778 | numa_set_cpumask(cpu, false); | 778 | numa_set_cpumask(cpu, false); |
779 | } | 779 | } |
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index dbbbb47260cc..a8f90ce3dedf 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c | |||
@@ -10,7 +10,7 @@ | |||
10 | 10 | ||
11 | #include "numa_internal.h" | 11 | #include "numa_internal.h" |
12 | 12 | ||
13 | static int emu_nid_to_phys[MAX_NUMNODES] __cpuinitdata; | 13 | static int emu_nid_to_phys[MAX_NUMNODES]; |
14 | static char *emu_cmdline __initdata; | 14 | static char *emu_cmdline __initdata; |
15 | 15 | ||
16 | void __init numa_emu_cmdline(char *str) | 16 | void __init numa_emu_cmdline(char *str) |
@@ -444,7 +444,7 @@ no_emu: | |||
444 | } | 444 | } |
445 | 445 | ||
446 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | 446 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
447 | void __cpuinit numa_add_cpu(int cpu) | 447 | void numa_add_cpu(int cpu) |
448 | { | 448 | { |
449 | int physnid, nid; | 449 | int physnid, nid; |
450 | 450 | ||
@@ -462,7 +462,7 @@ void __cpuinit numa_add_cpu(int cpu) | |||
462 | cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); | 462 | cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); |
463 | } | 463 | } |
464 | 464 | ||
465 | void __cpuinit numa_remove_cpu(int cpu) | 465 | void numa_remove_cpu(int cpu) |
466 | { | 466 | { |
467 | int i; | 467 | int i; |
468 | 468 | ||
@@ -470,7 +470,7 @@ void __cpuinit numa_remove_cpu(int cpu) | |||
470 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); | 470 | cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); |
471 | } | 471 | } |
472 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ | 472 | #else /* !CONFIG_DEBUG_PER_CPU_MAPS */ |
473 | static void __cpuinit numa_set_cpumask(int cpu, bool enable) | 473 | static void numa_set_cpumask(int cpu, bool enable) |
474 | { | 474 | { |
475 | int nid, physnid; | 475 | int nid, physnid; |
476 | 476 | ||
@@ -490,12 +490,12 @@ static void __cpuinit numa_set_cpumask(int cpu, bool enable) | |||
490 | } | 490 | } |
491 | } | 491 | } |
492 | 492 | ||
493 | void __cpuinit numa_add_cpu(int cpu) | 493 | void numa_add_cpu(int cpu) |
494 | { | 494 | { |
495 | numa_set_cpumask(cpu, true); | 495 | numa_set_cpumask(cpu, true); |
496 | } | 496 | } |
497 | 497 | ||
498 | void __cpuinit numa_remove_cpu(int cpu) | 498 | void numa_remove_cpu(int cpu) |
499 | { | 499 | { |
500 | numa_set_cpumask(cpu, false); | 500 | numa_set_cpumask(cpu, false); |
501 | } | 501 | } |
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c index 410531d3c292..90555bf60aa4 100644 --- a/arch/x86/mm/setup_nx.c +++ b/arch/x86/mm/setup_nx.c | |||
@@ -5,7 +5,7 @@ | |||
5 | #include <asm/pgtable.h> | 5 | #include <asm/pgtable.h> |
6 | #include <asm/proto.h> | 6 | #include <asm/proto.h> |
7 | 7 | ||
8 | static int disable_nx __cpuinitdata; | 8 | static int disable_nx; |
9 | 9 | ||
10 | /* | 10 | /* |
11 | * noexec = on|off | 11 | * noexec = on|off |
@@ -29,7 +29,7 @@ static int __init noexec_setup(char *str) | |||
29 | } | 29 | } |
30 | early_param("noexec", noexec_setup); | 30 | early_param("noexec", noexec_setup); |
31 | 31 | ||
32 | void __cpuinit x86_configure_nx(void) | 32 | void x86_configure_nx(void) |
33 | { | 33 | { |
34 | if (cpu_has_nx && !disable_nx) | 34 | if (cpu_has_nx && !disable_nx) |
35 | __supported_pte_mask |= _PAGE_NX; | 35 | __supported_pte_mask |= _PAGE_NX; |
diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c index e9e6ed5cdf94..a48be98e9ded 100644 --- a/arch/x86/pci/amd_bus.c +++ b/arch/x86/pci/amd_bus.c | |||
@@ -312,7 +312,7 @@ static int __init early_fill_mp_bus_info(void) | |||
312 | 312 | ||
313 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) | 313 | #define ENABLE_CF8_EXT_CFG (1ULL << 46) |
314 | 314 | ||
315 | static void __cpuinit enable_pci_io_ecs(void *unused) | 315 | static void enable_pci_io_ecs(void *unused) |
316 | { | 316 | { |
317 | u64 reg; | 317 | u64 reg; |
318 | rdmsrl(MSR_AMD64_NB_CFG, reg); | 318 | rdmsrl(MSR_AMD64_NB_CFG, reg); |
@@ -322,8 +322,8 @@ static void __cpuinit enable_pci_io_ecs(void *unused) | |||
322 | } | 322 | } |
323 | } | 323 | } |
324 | 324 | ||
325 | static int __cpuinit amd_cpu_notify(struct notifier_block *self, | 325 | static int amd_cpu_notify(struct notifier_block *self, unsigned long action, |
326 | unsigned long action, void *hcpu) | 326 | void *hcpu) |
327 | { | 327 | { |
328 | int cpu = (long)hcpu; | 328 | int cpu = (long)hcpu; |
329 | switch (action) { | 329 | switch (action) { |
@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self, | |||
337 | return NOTIFY_OK; | 337 | return NOTIFY_OK; |
338 | } | 338 | } |
339 | 339 | ||
340 | static struct notifier_block __cpuinitdata amd_cpu_notifier = { | 340 | static struct notifier_block amd_cpu_notifier = { |
341 | .notifier_call = amd_cpu_notify, | 341 | .notifier_call = amd_cpu_notify, |
342 | }; | 342 | }; |
343 | 343 | ||
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c index f8ab4945892e..baec704231b3 100644 --- a/arch/x86/platform/ce4100/ce4100.c +++ b/arch/x86/platform/ce4100/ce4100.c | |||
@@ -134,7 +134,7 @@ static void __init sdv_arch_setup(void) | |||
134 | } | 134 | } |
135 | 135 | ||
136 | #ifdef CONFIG_X86_IO_APIC | 136 | #ifdef CONFIG_X86_IO_APIC |
137 | static void __cpuinit sdv_pci_init(void) | 137 | static void sdv_pci_init(void) |
138 | { | 138 | { |
139 | x86_of_pci_init(); | 139 | x86_of_pci_init(); |
140 | /* We can't set this earlier, because we need to calibrate the timer */ | 140 | /* We can't set this earlier, because we need to calibrate the timer */ |
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c index a0a0a4389bbd..47fe66fe61f1 100644 --- a/arch/x86/platform/mrst/mrst.c +++ b/arch/x86/platform/mrst/mrst.c | |||
@@ -65,7 +65,7 @@ | |||
65 | * lapic (always-on,ARAT) ------ 150 | 65 | * lapic (always-on,ARAT) ------ 150 |
66 | */ | 66 | */ |
67 | 67 | ||
68 | __cpuinitdata enum mrst_timer_options mrst_timer_options; | 68 | enum mrst_timer_options mrst_timer_options; |
69 | 69 | ||
70 | static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; | 70 | static u32 sfi_mtimer_usage[SFI_MTMR_MAX_NUM]; |
71 | static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; | 71 | static struct sfi_timer_table_entry sfi_mtimer_array[SFI_MTMR_MAX_NUM]; |
@@ -248,7 +248,7 @@ static void __init mrst_time_init(void) | |||
248 | apbt_time_init(); | 248 | apbt_time_init(); |
249 | } | 249 | } |
250 | 250 | ||
251 | static void __cpuinit mrst_arch_setup(void) | 251 | static void mrst_arch_setup(void) |
252 | { | 252 | { |
253 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) | 253 | if (boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 0x27) |
254 | __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; | 254 | __mrst_cpu_chip = MRST_CPU_CHIP_PENWELL; |
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 2fa02bc50034..193097ef3d7d 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c | |||
@@ -1681,8 +1681,8 @@ static void __init init_hvm_pv_info(void) | |||
1681 | xen_domain_type = XEN_HVM_DOMAIN; | 1681 | xen_domain_type = XEN_HVM_DOMAIN; |
1682 | } | 1682 | } |
1683 | 1683 | ||
1684 | static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, | 1684 | static int xen_hvm_cpu_notify(struct notifier_block *self, unsigned long action, |
1685 | unsigned long action, void *hcpu) | 1685 | void *hcpu) |
1686 | { | 1686 | { |
1687 | int cpu = (long)hcpu; | 1687 | int cpu = (long)hcpu; |
1688 | switch (action) { | 1688 | switch (action) { |
@@ -1700,7 +1700,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, | |||
1700 | return NOTIFY_OK; | 1700 | return NOTIFY_OK; |
1701 | } | 1701 | } |
1702 | 1702 | ||
1703 | static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = { | 1703 | static struct notifier_block xen_hvm_cpu_notifier = { |
1704 | .notifier_call = xen_hvm_cpu_notify, | 1704 | .notifier_call = xen_hvm_cpu_notify, |
1705 | }; | 1705 | }; |
1706 | 1706 | ||
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index 94eac5c85cdc..056d11faef21 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c | |||
@@ -475,7 +475,7 @@ static void __init fiddle_vdso(void) | |||
475 | #endif | 475 | #endif |
476 | } | 476 | } |
477 | 477 | ||
478 | static int __cpuinit register_callback(unsigned type, const void *func) | 478 | static int register_callback(unsigned type, const void *func) |
479 | { | 479 | { |
480 | struct callback_register callback = { | 480 | struct callback_register callback = { |
481 | .type = type, | 481 | .type = type, |
@@ -486,7 +486,7 @@ static int __cpuinit register_callback(unsigned type, const void *func) | |||
486 | return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); | 486 | return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); |
487 | } | 487 | } |
488 | 488 | ||
489 | void __cpuinit xen_enable_sysenter(void) | 489 | void xen_enable_sysenter(void) |
490 | { | 490 | { |
491 | int ret; | 491 | int ret; |
492 | unsigned sysenter_feature; | 492 | unsigned sysenter_feature; |
@@ -505,7 +505,7 @@ void __cpuinit xen_enable_sysenter(void) | |||
505 | setup_clear_cpu_cap(sysenter_feature); | 505 | setup_clear_cpu_cap(sysenter_feature); |
506 | } | 506 | } |
507 | 507 | ||
508 | void __cpuinit xen_enable_syscall(void) | 508 | void xen_enable_syscall(void) |
509 | { | 509 | { |
510 | #ifdef CONFIG_X86_64 | 510 | #ifdef CONFIG_X86_64 |
511 | int ret; | 511 | int ret; |
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c index c1367b29c3b1..ca92754eb846 100644 --- a/arch/x86/xen/smp.c +++ b/arch/x86/xen/smp.c | |||
@@ -65,7 +65,7 @@ static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) | |||
65 | return IRQ_HANDLED; | 65 | return IRQ_HANDLED; |
66 | } | 66 | } |
67 | 67 | ||
68 | static void __cpuinit cpu_bringup(void) | 68 | static void cpu_bringup(void) |
69 | { | 69 | { |
70 | int cpu; | 70 | int cpu; |
71 | 71 | ||
@@ -97,7 +97,7 @@ static void __cpuinit cpu_bringup(void) | |||
97 | wmb(); /* make sure everything is out */ | 97 | wmb(); /* make sure everything is out */ |
98 | } | 98 | } |
99 | 99 | ||
100 | static void __cpuinit cpu_bringup_and_idle(void) | 100 | static void cpu_bringup_and_idle(void) |
101 | { | 101 | { |
102 | cpu_bringup(); | 102 | cpu_bringup(); |
103 | cpu_startup_entry(CPUHP_ONLINE); | 103 | cpu_startup_entry(CPUHP_ONLINE); |
@@ -326,7 +326,7 @@ static void __init xen_smp_prepare_cpus(unsigned int max_cpus) | |||
326 | set_cpu_present(cpu, true); | 326 | set_cpu_present(cpu, true); |
327 | } | 327 | } |
328 | 328 | ||
329 | static int __cpuinit | 329 | static int |
330 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | 330 | cpu_initialize_context(unsigned int cpu, struct task_struct *idle) |
331 | { | 331 | { |
332 | struct vcpu_guest_context *ctxt; | 332 | struct vcpu_guest_context *ctxt; |
@@ -397,7 +397,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle) | |||
397 | return 0; | 397 | return 0; |
398 | } | 398 | } |
399 | 399 | ||
400 | static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) | 400 | static int xen_cpu_up(unsigned int cpu, struct task_struct *idle) |
401 | { | 401 | { |
402 | int rc; | 402 | int rc; |
403 | 403 | ||
@@ -470,7 +470,7 @@ static void xen_cpu_die(unsigned int cpu) | |||
470 | xen_teardown_timer(cpu); | 470 | xen_teardown_timer(cpu); |
471 | } | 471 | } |
472 | 472 | ||
473 | static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ | 473 | static void xen_play_dead(void) /* used only with HOTPLUG_CPU */ |
474 | { | 474 | { |
475 | play_dead_common(); | 475 | play_dead_common(); |
476 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | 476 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); |
@@ -691,7 +691,7 @@ static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) | |||
691 | xen_init_lock_cpu(0); | 691 | xen_init_lock_cpu(0); |
692 | } | 692 | } |
693 | 693 | ||
694 | static int __cpuinit xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) | 694 | static int xen_hvm_cpu_up(unsigned int cpu, struct task_struct *tidle) |
695 | { | 695 | { |
696 | int rc; | 696 | int rc; |
697 | rc = native_cpu_up(cpu, tidle); | 697 | rc = native_cpu_up(cpu, tidle); |
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index a40f8508e760..cf3caee356b3 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c | |||
@@ -361,7 +361,7 @@ static irqreturn_t dummy_handler(int irq, void *dev_id) | |||
361 | return IRQ_HANDLED; | 361 | return IRQ_HANDLED; |
362 | } | 362 | } |
363 | 363 | ||
364 | void __cpuinit xen_init_lock_cpu(int cpu) | 364 | void xen_init_lock_cpu(int cpu) |
365 | { | 365 | { |
366 | int irq; | 366 | int irq; |
367 | char *name; | 367 | char *name; |
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index a95b41744ad0..86782c5d7e2a 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h | |||
@@ -73,7 +73,7 @@ static inline void xen_hvm_smp_init(void) {} | |||
73 | 73 | ||
74 | #ifdef CONFIG_PARAVIRT_SPINLOCKS | 74 | #ifdef CONFIG_PARAVIRT_SPINLOCKS |
75 | void __init xen_init_spinlocks(void); | 75 | void __init xen_init_spinlocks(void); |
76 | void __cpuinit xen_init_lock_cpu(int cpu); | 76 | void xen_init_lock_cpu(int cpu); |
77 | void xen_uninit_lock_cpu(int cpu); | 77 | void xen_uninit_lock_cpu(int cpu); |
78 | #else | 78 | #else |
79 | static inline void xen_init_spinlocks(void) | 79 | static inline void xen_init_spinlocks(void) |