aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorPaul Gortmaker <paul.gortmaker@windriver.com>2013-06-18 18:23:59 -0400
committerPaul Gortmaker <paul.gortmaker@windriver.com>2013-07-14 19:36:56 -0400
commit148f9bb87745ed45f7a11b2cbd3bc0f017d5d257 (patch)
tree88a21d992eae94a05cc30ddbc2c71465701ec3aa /arch/x86/kernel
parent70e2a7bf23a0c412b908ba260e790a4f51c9f2b0 (diff)
x86: delete __cpuinit usage from all x86 files
The __cpuinit type of throwaway sections might have made sense some time ago when RAM was more constrained, but now the savings do not offset the cost and complications. For example, the fix in commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time") is a good example of the nasty type of bugs that can be created with improper use of the various __init prefixes. After a discussion on LKML[1] it was decided that cpuinit should go the way of devinit and be phased out. Once all the users are gone, we can then finally remove the macros themselves from linux/init.h. Note that some harmless section mismatch warnings may result, since notify_cpu_starting() and cpu_up() are arch independent (kernel/cpu.c) are flagged as __cpuinit -- so if we remove the __cpuinit from arch specific callers, we will also get section mismatch warnings. As an intermediate step, we intend to turn the linux/init.h cpuinit content into no-ops as early as possible, since that will get rid of these warnings. In any case, they are temporary and harmless. This removes all the arch/x86 uses of the __cpuinit macros from all C files. x86 only had the one __CPUINIT used in assembly files, and it wasn't paired off with a .previous or a __FINIT, so we can delete it directly w/o any corresponding additional change there. [1] https://lkml.org/lkml/2013/5/20/589 Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: x86@kernel.org Acked-by: Ingo Molnar <mingo@kernel.org> Acked-by: Thomas Gleixner <tglx@linutronix.de> Acked-by: H. Peter Anvin <hpa@linux.intel.com> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/acpi/boot.c6
-rw-r--r--arch/x86/kernel/apic/apic.c30
-rw-r--r--arch/x86/kernel/apic/apic_numachip.c2
-rw-r--r--arch/x86/kernel/apic/es7000_32.c2
-rw-r--r--arch/x86/kernel/apic/numaq_32.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c14
-rw-r--r--arch/x86/kernel/cpu/amd.c33
-rw-r--r--arch/x86/kernel/cpu/centaur.c26
-rw-r--r--arch/x86/kernel/cpu/common.c64
-rw-r--r--arch/x86/kernel/cpu/cyrix.c40
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c30
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c55
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_ibs.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd_uncore.c31
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c20
-rw-r--r--arch/x86/kernel/cpu/rdrand.c2
-rw-r--r--arch/x86/kernel/cpu/scattered.c4
-rw-r--r--arch/x86/kernel/cpu/topology.c2
-rw-r--r--arch/x86/kernel/cpu/transmeta.c6
-rw-r--r--arch/x86/kernel/cpu/umc.c2
-rw-r--r--arch/x86/kernel/cpu/vmware.c2
-rw-r--r--arch/x86/kernel/cpuid.c7
-rw-r--r--arch/x86/kernel/devicetree.c2
-rw-r--r--arch/x86/kernel/head_32.S1
-rw-r--r--arch/x86/kernel/i387.c10
-rw-r--r--arch/x86/kernel/irq_32.c2
-rw-r--r--arch/x86/kernel/kvm.c10
-rw-r--r--arch/x86/kernel/kvmclock.c2
-rw-r--r--arch/x86/kernel/microcode_amd_early.c8
-rw-r--r--arch/x86/kernel/microcode_core.c2
-rw-r--r--arch/x86/kernel/microcode_core_early.c6
-rw-r--r--arch/x86/kernel/microcode_intel_early.c26
-rw-r--r--arch/x86/kernel/mmconf-fam10h_64.c12
-rw-r--r--arch/x86/kernel/msr.c6
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c28
-rw-r--r--arch/x86/kernel/tboot.c6
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kernel/tsc_sync.c18
-rw-r--r--arch/x86/kernel/vsyscall_64.c6
-rw-r--r--arch/x86/kernel/x86_init.c4
-rw-r--r--arch/x86/kernel/xsave.c4
49 files changed, 293 insertions, 302 deletions
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index d81a972dd506..2627a81253ee 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -195,7 +195,7 @@ static int __init acpi_parse_madt(struct acpi_table_header *table)
195 return 0; 195 return 0;
196} 196}
197 197
198static void __cpuinit acpi_register_lapic(int id, u8 enabled) 198static void acpi_register_lapic(int id, u8 enabled)
199{ 199{
200 unsigned int ver = 0; 200 unsigned int ver = 0;
201 201
@@ -607,7 +607,7 @@ void __init acpi_set_irq_model_ioapic(void)
607#ifdef CONFIG_ACPI_HOTPLUG_CPU 607#ifdef CONFIG_ACPI_HOTPLUG_CPU
608#include <acpi/processor.h> 608#include <acpi/processor.h>
609 609
610static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) 610static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
611{ 611{
612#ifdef CONFIG_ACPI_NUMA 612#ifdef CONFIG_ACPI_NUMA
613 int nid; 613 int nid;
@@ -620,7 +620,7 @@ static void __cpuinit acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
620#endif 620#endif
621} 621}
622 622
623static int __cpuinit _acpi_map_lsapic(acpi_handle handle, int *pcpu) 623static int _acpi_map_lsapic(acpi_handle handle, int *pcpu)
624{ 624{
625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; 625 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
626 union acpi_object *obj; 626 union acpi_object *obj;
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 99663b59123a..eca89c53a7f5 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -58,7 +58,7 @@
58 58
59unsigned int num_processors; 59unsigned int num_processors;
60 60
61unsigned disabled_cpus __cpuinitdata; 61unsigned disabled_cpus;
62 62
63/* Processor that is doing the boot up */ 63/* Processor that is doing the boot up */
64unsigned int boot_cpu_physical_apicid = -1U; 64unsigned int boot_cpu_physical_apicid = -1U;
@@ -544,7 +544,7 @@ static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
544 * Setup the local APIC timer for this CPU. Copy the initialized values 544 * Setup the local APIC timer for this CPU. Copy the initialized values
545 * of the boot CPU and register the clock event in the framework. 545 * of the boot CPU and register the clock event in the framework.
546 */ 546 */
547static void __cpuinit setup_APIC_timer(void) 547static void setup_APIC_timer(void)
548{ 548{
549 struct clock_event_device *levt = &__get_cpu_var(lapic_events); 549 struct clock_event_device *levt = &__get_cpu_var(lapic_events);
550 550
@@ -866,7 +866,7 @@ void __init setup_boot_APIC_clock(void)
866 setup_APIC_timer(); 866 setup_APIC_timer();
867} 867}
868 868
869void __cpuinit setup_secondary_APIC_clock(void) 869void setup_secondary_APIC_clock(void)
870{ 870{
871 setup_APIC_timer(); 871 setup_APIC_timer();
872} 872}
@@ -1229,7 +1229,7 @@ void __init init_bsp_APIC(void)
1229 apic_write(APIC_LVT1, value); 1229 apic_write(APIC_LVT1, value);
1230} 1230}
1231 1231
1232static void __cpuinit lapic_setup_esr(void) 1232static void lapic_setup_esr(void)
1233{ 1233{
1234 unsigned int oldvalue, value, maxlvt; 1234 unsigned int oldvalue, value, maxlvt;
1235 1235
@@ -1276,7 +1276,7 @@ static void __cpuinit lapic_setup_esr(void)
1276 * Used to setup local APIC while initializing BSP or bringin up APs. 1276 * Used to setup local APIC while initializing BSP or bringin up APs.
1277 * Always called with preemption disabled. 1277 * Always called with preemption disabled.
1278 */ 1278 */
1279void __cpuinit setup_local_APIC(void) 1279void setup_local_APIC(void)
1280{ 1280{
1281 int cpu = smp_processor_id(); 1281 int cpu = smp_processor_id();
1282 unsigned int value, queued; 1282 unsigned int value, queued;
@@ -1471,7 +1471,7 @@ void __cpuinit setup_local_APIC(void)
1471#endif 1471#endif
1472} 1472}
1473 1473
1474void __cpuinit end_local_APIC_setup(void) 1474void end_local_APIC_setup(void)
1475{ 1475{
1476 lapic_setup_esr(); 1476 lapic_setup_esr();
1477 1477
@@ -2107,7 +2107,7 @@ void disconnect_bsp_APIC(int virt_wire_setup)
2107 apic_write(APIC_LVT1, value); 2107 apic_write(APIC_LVT1, value);
2108} 2108}
2109 2109
2110void __cpuinit generic_processor_info(int apicid, int version) 2110void generic_processor_info(int apicid, int version)
2111{ 2111{
2112 int cpu, max = nr_cpu_ids; 2112 int cpu, max = nr_cpu_ids;
2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, 2113 bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid,
@@ -2377,7 +2377,7 @@ static struct syscore_ops lapic_syscore_ops = {
2377 .suspend = lapic_suspend, 2377 .suspend = lapic_suspend,
2378}; 2378};
2379 2379
2380static void __cpuinit apic_pm_activate(void) 2380static void apic_pm_activate(void)
2381{ 2381{
2382 apic_pm_state.active = 1; 2382 apic_pm_state.active = 1;
2383} 2383}
@@ -2402,7 +2402,7 @@ static void apic_pm_activate(void) { }
2402 2402
2403#ifdef CONFIG_X86_64 2403#ifdef CONFIG_X86_64
2404 2404
2405static int __cpuinit apic_cluster_num(void) 2405static int apic_cluster_num(void)
2406{ 2406{
2407 int i, clusters, zeros; 2407 int i, clusters, zeros;
2408 unsigned id; 2408 unsigned id;
@@ -2447,10 +2447,10 @@ static int __cpuinit apic_cluster_num(void)
2447 return clusters; 2447 return clusters;
2448} 2448}
2449 2449
2450static int __cpuinitdata multi_checked; 2450static int multi_checked;
2451static int __cpuinitdata multi; 2451static int multi;
2452 2452
2453static int __cpuinit set_multi(const struct dmi_system_id *d) 2453static int set_multi(const struct dmi_system_id *d)
2454{ 2454{
2455 if (multi) 2455 if (multi)
2456 return 0; 2456 return 0;
@@ -2459,7 +2459,7 @@ static int __cpuinit set_multi(const struct dmi_system_id *d)
2459 return 0; 2459 return 0;
2460} 2460}
2461 2461
2462static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = { 2462static const struct dmi_system_id multi_dmi_table[] = {
2463 { 2463 {
2464 .callback = set_multi, 2464 .callback = set_multi,
2465 .ident = "IBM System Summit2", 2465 .ident = "IBM System Summit2",
@@ -2471,7 +2471,7 @@ static const __cpuinitconst struct dmi_system_id multi_dmi_table[] = {
2471 {} 2471 {}
2472}; 2472};
2473 2473
2474static void __cpuinit dmi_check_multi(void) 2474static void dmi_check_multi(void)
2475{ 2475{
2476 if (multi_checked) 2476 if (multi_checked)
2477 return; 2477 return;
@@ -2488,7 +2488,7 @@ static void __cpuinit dmi_check_multi(void)
2488 * multi-chassis. 2488 * multi-chassis.
2489 * Use DMI to check them 2489 * Use DMI to check them
2490 */ 2490 */
2491__cpuinit int apic_is_clustered_box(void) 2491int apic_is_clustered_box(void)
2492{ 2492{
2493 dmi_check_multi(); 2493 dmi_check_multi();
2494 if (multi) 2494 if (multi)
diff --git a/arch/x86/kernel/apic/apic_numachip.c b/arch/x86/kernel/apic/apic_numachip.c
index 9a9110918ca7..3e67f9e3d7ef 100644
--- a/arch/x86/kernel/apic/apic_numachip.c
+++ b/arch/x86/kernel/apic/apic_numachip.c
@@ -74,7 +74,7 @@ static int numachip_phys_pkg_id(int initial_apic_id, int index_msb)
74 return initial_apic_id >> index_msb; 74 return initial_apic_id >> index_msb;
75} 75}
76 76
77static int __cpuinit numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip) 77static int numachip_wakeup_secondary(int phys_apicid, unsigned long start_rip)
78{ 78{
79 union numachip_csr_g3_ext_irq_gen int_gen; 79 union numachip_csr_g3_ext_irq_gen int_gen;
80 80
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 0874799a98c6..c55224731b2d 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -130,7 +130,7 @@ int es7000_plat;
130 */ 130 */
131 131
132 132
133static int __cpuinit wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip) 133static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
134{ 134{
135 unsigned long vect = 0, psaival = 0; 135 unsigned long vect = 0, psaival = 0;
136 136
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index d661ee95cabf..1e42e8f305ee 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -105,7 +105,7 @@ static void __init smp_dump_qct(void)
105 } 105 }
106} 106}
107 107
108void __cpuinit numaq_tsc_disable(void) 108void numaq_tsc_disable(void)
109{ 109{
110 if (!found_numaq) 110 if (!found_numaq)
111 return; 111 return;
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index c88baa4ff0e5..140e29db478d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -148,7 +148,7 @@ static void init_x2apic_ldr(void)
148 /* 148 /*
149 * At CPU state changes, update the x2apic cluster sibling info. 149 * At CPU state changes, update the x2apic cluster sibling info.
150 */ 150 */
151static int __cpuinit 151static int
152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu) 152update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
153{ 153{
154 unsigned int this_cpu = (unsigned long)hcpu; 154 unsigned int this_cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 63092afb142e..1191ac1c9d25 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -209,7 +209,7 @@ EXPORT_SYMBOL_GPL(uv_possible_blades);
209unsigned long sn_rtc_cycles_per_second; 209unsigned long sn_rtc_cycles_per_second;
210EXPORT_SYMBOL(sn_rtc_cycles_per_second); 210EXPORT_SYMBOL(sn_rtc_cycles_per_second);
211 211
212static int __cpuinit uv_wakeup_secondary(int phys_apicid, unsigned long start_rip) 212static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
213{ 213{
214#ifdef CONFIG_SMP 214#ifdef CONFIG_SMP
215 unsigned long val; 215 unsigned long val;
@@ -416,7 +416,7 @@ static struct apic __refdata apic_x2apic_uv_x = {
416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle, 416 .safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
417}; 417};
418 418
419static __cpuinit void set_x2apic_extra_bits(int pnode) 419static void set_x2apic_extra_bits(int pnode)
420{ 420{
421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift); 421 __this_cpu_write(x2apic_extra_bits, pnode << uvh_apicid.s.pnode_shift);
422} 422}
@@ -735,7 +735,7 @@ static void uv_heartbeat(unsigned long ignored)
735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL); 735 mod_timer_pinned(timer, jiffies + SCIR_CPU_HB_INTERVAL);
736} 736}
737 737
738static void __cpuinit uv_heartbeat_enable(int cpu) 738static void uv_heartbeat_enable(int cpu)
739{ 739{
740 while (!uv_cpu_hub_info(cpu)->scir.enabled) { 740 while (!uv_cpu_hub_info(cpu)->scir.enabled) {
741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer; 741 struct timer_list *timer = &uv_cpu_hub_info(cpu)->scir.timer;
@@ -752,7 +752,7 @@ static void __cpuinit uv_heartbeat_enable(int cpu)
752} 752}
753 753
754#ifdef CONFIG_HOTPLUG_CPU 754#ifdef CONFIG_HOTPLUG_CPU
755static void __cpuinit uv_heartbeat_disable(int cpu) 755static void uv_heartbeat_disable(int cpu)
756{ 756{
757 if (uv_cpu_hub_info(cpu)->scir.enabled) { 757 if (uv_cpu_hub_info(cpu)->scir.enabled) {
758 uv_cpu_hub_info(cpu)->scir.enabled = 0; 758 uv_cpu_hub_info(cpu)->scir.enabled = 0;
@@ -764,8 +764,8 @@ static void __cpuinit uv_heartbeat_disable(int cpu)
764/* 764/*
765 * cpu hotplug notifier 765 * cpu hotplug notifier
766 */ 766 */
767static __cpuinit int uv_scir_cpu_notify(struct notifier_block *self, 767static int uv_scir_cpu_notify(struct notifier_block *self, unsigned long action,
768 unsigned long action, void *hcpu) 768 void *hcpu)
769{ 769{
770 long cpu = (long)hcpu; 770 long cpu = (long)hcpu;
771 771
@@ -835,7 +835,7 @@ int uv_set_vga_state(struct pci_dev *pdev, bool decode,
835 * Called on each cpu to initialize the per_cpu UV data area. 835 * Called on each cpu to initialize the per_cpu UV data area.
836 * FIXME: hotplug not supported yet 836 * FIXME: hotplug not supported yet
837 */ 837 */
838void __cpuinit uv_cpu_init(void) 838void uv_cpu_init(void)
839{ 839{
840 /* CPU 0 initilization will be done via uv_system_init. */ 840 /* CPU 0 initilization will be done via uv_system_init. */
841 if (!uv_blade_info) 841 if (!uv_blade_info)
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index c587a8757227..f654ecefea5b 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -69,7 +69,7 @@ static inline int wrmsrl_amd_safe(unsigned msr, unsigned long long val)
69extern void vide(void); 69extern void vide(void);
70__asm__(".align 4\nvide: ret"); 70__asm__(".align 4\nvide: ret");
71 71
72static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c) 72static void init_amd_k5(struct cpuinfo_x86 *c)
73{ 73{
74/* 74/*
75 * General Systems BIOSen alias the cpu frequency registers 75 * General Systems BIOSen alias the cpu frequency registers
@@ -87,7 +87,7 @@ static void __cpuinit init_amd_k5(struct cpuinfo_x86 *c)
87} 87}
88 88
89 89
90static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c) 90static void init_amd_k6(struct cpuinfo_x86 *c)
91{ 91{
92 u32 l, h; 92 u32 l, h;
93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT); 93 int mbytes = get_num_physpages() >> (20-PAGE_SHIFT);
@@ -179,7 +179,7 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
179 } 179 }
180} 180}
181 181
182static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c) 182static void amd_k7_smp_check(struct cpuinfo_x86 *c)
183{ 183{
184 /* calling is from identify_secondary_cpu() ? */ 184 /* calling is from identify_secondary_cpu() ? */
185 if (!c->cpu_index) 185 if (!c->cpu_index)
@@ -222,7 +222,7 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE); 222 add_taint(TAINT_UNSAFE_SMP, LOCKDEP_NOW_UNRELIABLE);
223} 223}
224 224
225static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) 225static void init_amd_k7(struct cpuinfo_x86 *c)
226{ 226{
227 u32 l, h; 227 u32 l, h;
228 228
@@ -267,7 +267,7 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
267 * To workaround broken NUMA config. Read the comment in 267 * To workaround broken NUMA config. Read the comment in
268 * srat_detect_node(). 268 * srat_detect_node().
269 */ 269 */
270static int __cpuinit nearby_node(int apicid) 270static int nearby_node(int apicid)
271{ 271{
272 int i, node; 272 int i, node;
273 273
@@ -292,7 +292,7 @@ static int __cpuinit nearby_node(int apicid)
292 * (2) AMD processors supporting compute units 292 * (2) AMD processors supporting compute units
293 */ 293 */
294#ifdef CONFIG_X86_HT 294#ifdef CONFIG_X86_HT
295static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c) 295static void amd_get_topology(struct cpuinfo_x86 *c)
296{ 296{
297 u32 nodes, cores_per_cu = 1; 297 u32 nodes, cores_per_cu = 1;
298 u8 node_id; 298 u8 node_id;
@@ -342,7 +342,7 @@ static void __cpuinit amd_get_topology(struct cpuinfo_x86 *c)
342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores. 342 * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
343 * Assumes number of cores is a power of two. 343 * Assumes number of cores is a power of two.
344 */ 344 */
345static void __cpuinit amd_detect_cmp(struct cpuinfo_x86 *c) 345static void amd_detect_cmp(struct cpuinfo_x86 *c)
346{ 346{
347#ifdef CONFIG_X86_HT 347#ifdef CONFIG_X86_HT
348 unsigned bits; 348 unsigned bits;
@@ -369,7 +369,7 @@ u16 amd_get_nb_id(int cpu)
369} 369}
370EXPORT_SYMBOL_GPL(amd_get_nb_id); 370EXPORT_SYMBOL_GPL(amd_get_nb_id);
371 371
372static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 372static void srat_detect_node(struct cpuinfo_x86 *c)
373{ 373{
374#ifdef CONFIG_NUMA 374#ifdef CONFIG_NUMA
375 int cpu = smp_processor_id(); 375 int cpu = smp_processor_id();
@@ -421,7 +421,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
421#endif 421#endif
422} 422}
423 423
424static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c) 424static void early_init_amd_mc(struct cpuinfo_x86 *c)
425{ 425{
426#ifdef CONFIG_X86_HT 426#ifdef CONFIG_X86_HT
427 unsigned bits, ecx; 427 unsigned bits, ecx;
@@ -447,7 +447,7 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
447#endif 447#endif
448} 448}
449 449
450static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c) 450static void bsp_init_amd(struct cpuinfo_x86 *c)
451{ 451{
452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) { 452 if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
453 453
@@ -475,7 +475,7 @@ static void __cpuinit bsp_init_amd(struct cpuinfo_x86 *c)
475 } 475 }
476} 476}
477 477
478static void __cpuinit early_init_amd(struct cpuinfo_x86 *c) 478static void early_init_amd(struct cpuinfo_x86 *c)
479{ 479{
480 early_init_amd_mc(c); 480 early_init_amd_mc(c);
481 481
@@ -514,7 +514,7 @@ static const int amd_erratum_383[];
514static const int amd_erratum_400[]; 514static const int amd_erratum_400[];
515static bool cpu_has_amd_erratum(const int *erratum); 515static bool cpu_has_amd_erratum(const int *erratum);
516 516
517static void __cpuinit init_amd(struct cpuinfo_x86 *c) 517static void init_amd(struct cpuinfo_x86 *c)
518{ 518{
519 u32 dummy; 519 u32 dummy;
520 unsigned long long value; 520 unsigned long long value;
@@ -740,8 +740,7 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
740} 740}
741 741
742#ifdef CONFIG_X86_32 742#ifdef CONFIG_X86_32
743static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, 743static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
744 unsigned int size)
745{ 744{
746 /* AMD errata T13 (order #21922) */ 745 /* AMD errata T13 (order #21922) */
747 if ((c->x86 == 6)) { 746 if ((c->x86 == 6)) {
@@ -757,7 +756,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
757} 756}
758#endif 757#endif
759 758
760static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c) 759static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
761{ 760{
762 tlb_flushall_shift = 5; 761 tlb_flushall_shift = 5;
763 762
@@ -765,7 +764,7 @@ static void __cpuinit cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
765 tlb_flushall_shift = 4; 764 tlb_flushall_shift = 4;
766} 765}
767 766
768static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c) 767static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
769{ 768{
770 u32 ebx, eax, ecx, edx; 769 u32 ebx, eax, ecx, edx;
771 u16 mask = 0xfff; 770 u16 mask = 0xfff;
@@ -820,7 +819,7 @@ static void __cpuinit cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
820 cpu_set_tlb_flushall_shift(c); 819 cpu_set_tlb_flushall_shift(c);
821} 820}
822 821
823static const struct cpu_dev __cpuinitconst amd_cpu_dev = { 822static const struct cpu_dev amd_cpu_dev = {
824 .c_vendor = "AMD", 823 .c_vendor = "AMD",
825 .c_ident = { "AuthenticAMD" }, 824 .c_ident = { "AuthenticAMD" },
826#ifdef CONFIG_X86_32 825#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 159103c0b1f4..fbf6c3bc2400 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -11,7 +11,7 @@
11 11
12#ifdef CONFIG_X86_OOSTORE 12#ifdef CONFIG_X86_OOSTORE
13 13
14static u32 __cpuinit power2(u32 x) 14static u32 power2(u32 x)
15{ 15{
16 u32 s = 1; 16 u32 s = 1;
17 17
@@ -25,7 +25,7 @@ static u32 __cpuinit power2(u32 x)
25/* 25/*
26 * Set up an actual MCR 26 * Set up an actual MCR
27 */ 27 */
28static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key) 28static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
29{ 29{
30 u32 lo, hi; 30 u32 lo, hi;
31 31
@@ -42,7 +42,7 @@ static void __cpuinit centaur_mcr_insert(int reg, u32 base, u32 size, int key)
42 * 42 *
43 * Shortcut: We know you can't put 4Gig of RAM on a winchip 43 * Shortcut: We know you can't put 4Gig of RAM on a winchip
44 */ 44 */
45static u32 __cpuinit ramtop(void) 45static u32 ramtop(void)
46{ 46{
47 u32 clip = 0xFFFFFFFFUL; 47 u32 clip = 0xFFFFFFFFUL;
48 u32 top = 0; 48 u32 top = 0;
@@ -91,7 +91,7 @@ static u32 __cpuinit ramtop(void)
91/* 91/*
92 * Compute a set of MCR's to give maximum coverage 92 * Compute a set of MCR's to give maximum coverage
93 */ 93 */
94static int __cpuinit centaur_mcr_compute(int nr, int key) 94static int centaur_mcr_compute(int nr, int key)
95{ 95{
96 u32 mem = ramtop(); 96 u32 mem = ramtop();
97 u32 root = power2(mem); 97 u32 root = power2(mem);
@@ -157,7 +157,7 @@ static int __cpuinit centaur_mcr_compute(int nr, int key)
157 return ct; 157 return ct;
158} 158}
159 159
160static void __cpuinit centaur_create_optimal_mcr(void) 160static void centaur_create_optimal_mcr(void)
161{ 161{
162 int used; 162 int used;
163 int i; 163 int i;
@@ -181,7 +181,7 @@ static void __cpuinit centaur_create_optimal_mcr(void)
181 wrmsr(MSR_IDT_MCR0+i, 0, 0); 181 wrmsr(MSR_IDT_MCR0+i, 0, 0);
182} 182}
183 183
184static void __cpuinit winchip2_create_optimal_mcr(void) 184static void winchip2_create_optimal_mcr(void)
185{ 185{
186 u32 lo, hi; 186 u32 lo, hi;
187 int used; 187 int used;
@@ -217,7 +217,7 @@ static void __cpuinit winchip2_create_optimal_mcr(void)
217/* 217/*
218 * Handle the MCR key on the Winchip 2. 218 * Handle the MCR key on the Winchip 2.
219 */ 219 */
220static void __cpuinit winchip2_unprotect_mcr(void) 220static void winchip2_unprotect_mcr(void)
221{ 221{
222 u32 lo, hi; 222 u32 lo, hi;
223 u32 key; 223 u32 key;
@@ -229,7 +229,7 @@ static void __cpuinit winchip2_unprotect_mcr(void)
229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi); 229 wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
230} 230}
231 231
232static void __cpuinit winchip2_protect_mcr(void) 232static void winchip2_protect_mcr(void)
233{ 233{
234 u32 lo, hi; 234 u32 lo, hi;
235 235
@@ -247,7 +247,7 @@ static void __cpuinit winchip2_protect_mcr(void)
247#define RNG_ENABLED (1 << 3) 247#define RNG_ENABLED (1 << 3)
248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */ 248#define RNG_ENABLE (1 << 6) /* MSR_VIA_RNG */
249 249
250static void __cpuinit init_c3(struct cpuinfo_x86 *c) 250static void init_c3(struct cpuinfo_x86 *c)
251{ 251{
252 u32 lo, hi; 252 u32 lo, hi;
253 253
@@ -318,7 +318,7 @@ enum {
318 EAMD3D = 1<<20, 318 EAMD3D = 1<<20,
319}; 319};
320 320
321static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c) 321static void early_init_centaur(struct cpuinfo_x86 *c)
322{ 322{
323 switch (c->x86) { 323 switch (c->x86) {
324#ifdef CONFIG_X86_32 324#ifdef CONFIG_X86_32
@@ -337,7 +337,7 @@ static void __cpuinit early_init_centaur(struct cpuinfo_x86 *c)
337#endif 337#endif
338} 338}
339 339
340static void __cpuinit init_centaur(struct cpuinfo_x86 *c) 340static void init_centaur(struct cpuinfo_x86 *c)
341{ 341{
342#ifdef CONFIG_X86_32 342#ifdef CONFIG_X86_32
343 char *name; 343 char *name;
@@ -468,7 +468,7 @@ static void __cpuinit init_centaur(struct cpuinfo_x86 *c)
468#endif 468#endif
469} 469}
470 470
471static unsigned int __cpuinit 471static unsigned int
472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size) 472centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
473{ 473{
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
@@ -488,7 +488,7 @@ centaur_size_cache(struct cpuinfo_x86 *c, unsigned int size)
488 return size; 488 return size;
489} 489}
490 490
491static const struct cpu_dev __cpuinitconst centaur_cpu_dev = { 491static const struct cpu_dev centaur_cpu_dev = {
492 .c_vendor = "Centaur", 492 .c_vendor = "Centaur",
493 .c_ident = { "CentaurHauls" }, 493 .c_ident = { "CentaurHauls" },
494 .c_early_init = early_init_centaur, 494 .c_early_init = early_init_centaur,
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 548bd039784e..25eb2747b063 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -63,7 +63,7 @@ void __init setup_cpu_local_masks(void)
63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); 63 alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask);
64} 64}
65 65
66static void __cpuinit default_init(struct cpuinfo_x86 *c) 66static void default_init(struct cpuinfo_x86 *c)
67{ 67{
68#ifdef CONFIG_X86_64 68#ifdef CONFIG_X86_64
69 cpu_detect_cache_sizes(c); 69 cpu_detect_cache_sizes(c);
@@ -80,13 +80,13 @@ static void __cpuinit default_init(struct cpuinfo_x86 *c)
80#endif 80#endif
81} 81}
82 82
83static const struct cpu_dev __cpuinitconst default_cpu = { 83static const struct cpu_dev default_cpu = {
84 .c_init = default_init, 84 .c_init = default_init,
85 .c_vendor = "Unknown", 85 .c_vendor = "Unknown",
86 .c_x86_vendor = X86_VENDOR_UNKNOWN, 86 .c_x86_vendor = X86_VENDOR_UNKNOWN,
87}; 87};
88 88
89static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; 89static const struct cpu_dev *this_cpu = &default_cpu;
90 90
91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { 91DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
92#ifdef CONFIG_X86_64 92#ifdef CONFIG_X86_64
@@ -160,8 +160,8 @@ static int __init x86_xsaveopt_setup(char *s)
160__setup("noxsaveopt", x86_xsaveopt_setup); 160__setup("noxsaveopt", x86_xsaveopt_setup);
161 161
162#ifdef CONFIG_X86_32 162#ifdef CONFIG_X86_32
163static int cachesize_override __cpuinitdata = -1; 163static int cachesize_override = -1;
164static int disable_x86_serial_nr __cpuinitdata = 1; 164static int disable_x86_serial_nr = 1;
165 165
166static int __init cachesize_setup(char *str) 166static int __init cachesize_setup(char *str)
167{ 167{
@@ -215,12 +215,12 @@ static inline int flag_is_changeable_p(u32 flag)
215} 215}
216 216
217/* Probe for the CPUID instruction */ 217/* Probe for the CPUID instruction */
218int __cpuinit have_cpuid_p(void) 218int have_cpuid_p(void)
219{ 219{
220 return flag_is_changeable_p(X86_EFLAGS_ID); 220 return flag_is_changeable_p(X86_EFLAGS_ID);
221} 221}
222 222
223static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 223static void squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
224{ 224{
225 unsigned long lo, hi; 225 unsigned long lo, hi;
226 226
@@ -298,7 +298,7 @@ struct cpuid_dependent_feature {
298 u32 level; 298 u32 level;
299}; 299};
300 300
301static const struct cpuid_dependent_feature __cpuinitconst 301static const struct cpuid_dependent_feature
302cpuid_dependent_features[] = { 302cpuid_dependent_features[] = {
303 { X86_FEATURE_MWAIT, 0x00000005 }, 303 { X86_FEATURE_MWAIT, 0x00000005 },
304 { X86_FEATURE_DCA, 0x00000009 }, 304 { X86_FEATURE_DCA, 0x00000009 },
@@ -306,7 +306,7 @@ cpuid_dependent_features[] = {
306 { 0, 0 } 306 { 0, 0 }
307}; 307};
308 308
309static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn) 309static void filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
310{ 310{
311 const struct cpuid_dependent_feature *df; 311 const struct cpuid_dependent_feature *df;
312 312
@@ -344,7 +344,7 @@ static void __cpuinit filter_cpuid_features(struct cpuinfo_x86 *c, bool warn)
344 */ 344 */
345 345
346/* Look up CPU names by table lookup. */ 346/* Look up CPU names by table lookup. */
347static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c) 347static const char *table_lookup_model(struct cpuinfo_x86 *c)
348{ 348{
349 const struct cpu_model_info *info; 349 const struct cpu_model_info *info;
350 350
@@ -364,8 +364,8 @@ static const char *__cpuinit table_lookup_model(struct cpuinfo_x86 *c)
364 return NULL; /* Not found */ 364 return NULL; /* Not found */
365} 365}
366 366
367__u32 cpu_caps_cleared[NCAPINTS] __cpuinitdata; 367__u32 cpu_caps_cleared[NCAPINTS];
368__u32 cpu_caps_set[NCAPINTS] __cpuinitdata; 368__u32 cpu_caps_set[NCAPINTS];
369 369
370void load_percpu_segment(int cpu) 370void load_percpu_segment(int cpu)
371{ 371{
@@ -394,9 +394,9 @@ void switch_to_new_gdt(int cpu)
394 load_percpu_segment(cpu); 394 load_percpu_segment(cpu);
395} 395}
396 396
397static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; 397static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
398 398
399static void __cpuinit get_model_name(struct cpuinfo_x86 *c) 399static void get_model_name(struct cpuinfo_x86 *c)
400{ 400{
401 unsigned int *v; 401 unsigned int *v;
402 char *p, *q; 402 char *p, *q;
@@ -425,7 +425,7 @@ static void __cpuinit get_model_name(struct cpuinfo_x86 *c)
425 } 425 }
426} 426}
427 427
428void __cpuinit cpu_detect_cache_sizes(struct cpuinfo_x86 *c) 428void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
429{ 429{
430 unsigned int n, dummy, ebx, ecx, edx, l2size; 430 unsigned int n, dummy, ebx, ecx, edx, l2size;
431 431
@@ -479,7 +479,7 @@ u16 __read_mostly tlb_lld_4m[NR_INFO];
479 */ 479 */
480s8 __read_mostly tlb_flushall_shift = -1; 480s8 __read_mostly tlb_flushall_shift = -1;
481 481
482void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c) 482void cpu_detect_tlb(struct cpuinfo_x86 *c)
483{ 483{
484 if (this_cpu->c_detect_tlb) 484 if (this_cpu->c_detect_tlb)
485 this_cpu->c_detect_tlb(c); 485 this_cpu->c_detect_tlb(c);
@@ -493,7 +493,7 @@ void __cpuinit cpu_detect_tlb(struct cpuinfo_x86 *c)
493 tlb_flushall_shift); 493 tlb_flushall_shift);
494} 494}
495 495
496void __cpuinit detect_ht(struct cpuinfo_x86 *c) 496void detect_ht(struct cpuinfo_x86 *c)
497{ 497{
498#ifdef CONFIG_X86_HT 498#ifdef CONFIG_X86_HT
499 u32 eax, ebx, ecx, edx; 499 u32 eax, ebx, ecx, edx;
@@ -544,7 +544,7 @@ out:
544#endif 544#endif
545} 545}
546 546
547static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c) 547static void get_cpu_vendor(struct cpuinfo_x86 *c)
548{ 548{
549 char *v = c->x86_vendor_id; 549 char *v = c->x86_vendor_id;
550 int i; 550 int i;
@@ -571,7 +571,7 @@ static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
571 this_cpu = &default_cpu; 571 this_cpu = &default_cpu;
572} 572}
573 573
574void __cpuinit cpu_detect(struct cpuinfo_x86 *c) 574void cpu_detect(struct cpuinfo_x86 *c)
575{ 575{
576 /* Get vendor name */ 576 /* Get vendor name */
577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level, 577 cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
@@ -601,7 +601,7 @@ void __cpuinit cpu_detect(struct cpuinfo_x86 *c)
601 } 601 }
602} 602}
603 603
604void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c) 604void get_cpu_cap(struct cpuinfo_x86 *c)
605{ 605{
606 u32 tfms, xlvl; 606 u32 tfms, xlvl;
607 u32 ebx; 607 u32 ebx;
@@ -652,7 +652,7 @@ void __cpuinit get_cpu_cap(struct cpuinfo_x86 *c)
652 init_scattered_cpuid_features(c); 652 init_scattered_cpuid_features(c);
653} 653}
654 654
655static void __cpuinit identify_cpu_without_cpuid(struct cpuinfo_x86 *c) 655static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
656{ 656{
657#ifdef CONFIG_X86_32 657#ifdef CONFIG_X86_32
658 int i; 658 int i;
@@ -769,7 +769,7 @@ void __init early_cpu_init(void)
769 * unless we can find a reliable way to detect all the broken cases. 769 * unless we can find a reliable way to detect all the broken cases.
770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has(). 770 * Enable it explicitly on 64-bit for non-constant inputs of cpu_has().
771 */ 771 */
772static void __cpuinit detect_nopl(struct cpuinfo_x86 *c) 772static void detect_nopl(struct cpuinfo_x86 *c)
773{ 773{
774#ifdef CONFIG_X86_32 774#ifdef CONFIG_X86_32
775 clear_cpu_cap(c, X86_FEATURE_NOPL); 775 clear_cpu_cap(c, X86_FEATURE_NOPL);
@@ -778,7 +778,7 @@ static void __cpuinit detect_nopl(struct cpuinfo_x86 *c)
778#endif 778#endif
779} 779}
780 780
781static void __cpuinit generic_identify(struct cpuinfo_x86 *c) 781static void generic_identify(struct cpuinfo_x86 *c)
782{ 782{
783 c->extended_cpuid_level = 0; 783 c->extended_cpuid_level = 0;
784 784
@@ -815,7 +815,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
815/* 815/*
816 * This does the hard work of actually picking apart the CPU stuff... 816 * This does the hard work of actually picking apart the CPU stuff...
817 */ 817 */
818static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) 818static void identify_cpu(struct cpuinfo_x86 *c)
819{ 819{
820 int i; 820 int i;
821 821
@@ -960,7 +960,7 @@ void __init identify_boot_cpu(void)
960 cpu_detect_tlb(&boot_cpu_data); 960 cpu_detect_tlb(&boot_cpu_data);
961} 961}
962 962
963void __cpuinit identify_secondary_cpu(struct cpuinfo_x86 *c) 963void identify_secondary_cpu(struct cpuinfo_x86 *c)
964{ 964{
965 BUG_ON(c == &boot_cpu_data); 965 BUG_ON(c == &boot_cpu_data);
966 identify_cpu(c); 966 identify_cpu(c);
@@ -975,14 +975,14 @@ struct msr_range {
975 unsigned max; 975 unsigned max;
976}; 976};
977 977
978static const struct msr_range msr_range_array[] __cpuinitconst = { 978static const struct msr_range msr_range_array[] = {
979 { 0x00000000, 0x00000418}, 979 { 0x00000000, 0x00000418},
980 { 0xc0000000, 0xc000040b}, 980 { 0xc0000000, 0xc000040b},
981 { 0xc0010000, 0xc0010142}, 981 { 0xc0010000, 0xc0010142},
982 { 0xc0011000, 0xc001103b}, 982 { 0xc0011000, 0xc001103b},
983}; 983};
984 984
985static void __cpuinit __print_cpu_msr(void) 985static void __print_cpu_msr(void)
986{ 986{
987 unsigned index_min, index_max; 987 unsigned index_min, index_max;
988 unsigned index; 988 unsigned index;
@@ -1001,7 +1001,7 @@ static void __cpuinit __print_cpu_msr(void)
1001 } 1001 }
1002} 1002}
1003 1003
1004static int show_msr __cpuinitdata; 1004static int show_msr;
1005 1005
1006static __init int setup_show_msr(char *arg) 1006static __init int setup_show_msr(char *arg)
1007{ 1007{
@@ -1022,7 +1022,7 @@ static __init int setup_noclflush(char *arg)
1022} 1022}
1023__setup("noclflush", setup_noclflush); 1023__setup("noclflush", setup_noclflush);
1024 1024
1025void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) 1025void print_cpu_info(struct cpuinfo_x86 *c)
1026{ 1026{
1027 const char *vendor = NULL; 1027 const char *vendor = NULL;
1028 1028
@@ -1051,7 +1051,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
1051 print_cpu_msr(c); 1051 print_cpu_msr(c);
1052} 1052}
1053 1053
1054void __cpuinit print_cpu_msr(struct cpuinfo_x86 *c) 1054void print_cpu_msr(struct cpuinfo_x86 *c)
1055{ 1055{
1056 if (c->cpu_index < show_msr) 1056 if (c->cpu_index < show_msr)
1057 __print_cpu_msr(); 1057 __print_cpu_msr();
@@ -1216,7 +1216,7 @@ static void dbg_restore_debug_regs(void)
1216 */ 1216 */
1217#ifdef CONFIG_X86_64 1217#ifdef CONFIG_X86_64
1218 1218
1219void __cpuinit cpu_init(void) 1219void cpu_init(void)
1220{ 1220{
1221 struct orig_ist *oist; 1221 struct orig_ist *oist;
1222 struct task_struct *me; 1222 struct task_struct *me;
@@ -1315,7 +1315,7 @@ void __cpuinit cpu_init(void)
1315 1315
1316#else 1316#else
1317 1317
1318void __cpuinit cpu_init(void) 1318void cpu_init(void)
1319{ 1319{
1320 int cpu = smp_processor_id(); 1320 int cpu = smp_processor_id();
1321 struct task_struct *curr = current; 1321 struct task_struct *curr = current;
diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c
index 7582f475b163..d0969c75ab54 100644
--- a/arch/x86/kernel/cpu/cyrix.c
+++ b/arch/x86/kernel/cpu/cyrix.c
@@ -15,7 +15,7 @@
15/* 15/*
16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU 16 * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU
17 */ 17 */
18static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 18static void __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
19{ 19{
20 unsigned char ccr2, ccr3; 20 unsigned char ccr2, ccr3;
21 21
@@ -44,7 +44,7 @@ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
44 } 44 }
45} 45}
46 46
47static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) 47static void do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
48{ 48{
49 unsigned long flags; 49 unsigned long flags;
50 50
@@ -59,25 +59,25 @@ static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1)
59 * Actually since bugs.h doesn't even reference this perhaps someone should 59 * Actually since bugs.h doesn't even reference this perhaps someone should
60 * fix the documentation ??? 60 * fix the documentation ???
61 */ 61 */
62static unsigned char Cx86_dir0_msb __cpuinitdata = 0; 62static unsigned char Cx86_dir0_msb = 0;
63 63
64static const char __cpuinitconst Cx86_model[][9] = { 64static const char Cx86_model[][9] = {
65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", 65 "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ",
66 "M II ", "Unknown" 66 "M II ", "Unknown"
67}; 67};
68static const char __cpuinitconst Cx486_name[][5] = { 68static const char Cx486_name[][5] = {
69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", 69 "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx",
70 "SRx2", "DRx2" 70 "SRx2", "DRx2"
71}; 71};
72static const char __cpuinitconst Cx486S_name[][4] = { 72static const char Cx486S_name[][4] = {
73 "S", "S2", "Se", "S2e" 73 "S", "S2", "Se", "S2e"
74}; 74};
75static const char __cpuinitconst Cx486D_name[][4] = { 75static const char Cx486D_name[][4] = {
76 "DX", "DX2", "?", "?", "?", "DX4" 76 "DX", "DX2", "?", "?", "?", "DX4"
77}; 77};
78static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; 78static char Cx86_cb[] = "?.5x Core/Bus Clock";
79static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; 79static const char cyrix_model_mult1[] = "12??43";
80static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; 80static const char cyrix_model_mult2[] = "12233445";
81 81
82/* 82/*
83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old 83 * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old
@@ -87,7 +87,7 @@ static const char __cpuinitconst cyrix_model_mult2[] = "12233445";
87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP 87 * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP
88 */ 88 */
89 89
90static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) 90static void check_cx686_slop(struct cpuinfo_x86 *c)
91{ 91{
92 unsigned long flags; 92 unsigned long flags;
93 93
@@ -112,7 +112,7 @@ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c)
112} 112}
113 113
114 114
115static void __cpuinit set_cx86_reorder(void) 115static void set_cx86_reorder(void)
116{ 116{
117 u8 ccr3; 117 u8 ccr3;
118 118
@@ -127,7 +127,7 @@ static void __cpuinit set_cx86_reorder(void)
127 setCx86(CX86_CCR3, ccr3); 127 setCx86(CX86_CCR3, ccr3);
128} 128}
129 129
130static void __cpuinit set_cx86_memwb(void) 130static void set_cx86_memwb(void)
131{ 131{
132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); 132 printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n");
133 133
@@ -143,7 +143,7 @@ static void __cpuinit set_cx86_memwb(void)
143 * Configure later MediaGX and/or Geode processor. 143 * Configure later MediaGX and/or Geode processor.
144 */ 144 */
145 145
146static void __cpuinit geode_configure(void) 146static void geode_configure(void)
147{ 147{
148 unsigned long flags; 148 unsigned long flags;
149 u8 ccr3; 149 u8 ccr3;
@@ -166,7 +166,7 @@ static void __cpuinit geode_configure(void)
166 local_irq_restore(flags); 166 local_irq_restore(flags);
167} 167}
168 168
169static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) 169static void early_init_cyrix(struct cpuinfo_x86 *c)
170{ 170{
171 unsigned char dir0, dir0_msn, dir1 = 0; 171 unsigned char dir0, dir0_msn, dir1 = 0;
172 172
@@ -185,7 +185,7 @@ static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c)
185 } 185 }
186} 186}
187 187
188static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) 188static void init_cyrix(struct cpuinfo_x86 *c)
189{ 189{
190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; 190 unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0;
191 char *buf = c->x86_model_id; 191 char *buf = c->x86_model_id;
@@ -356,7 +356,7 @@ static void __cpuinit init_cyrix(struct cpuinfo_x86 *c)
356/* 356/*
357 * Handle National Semiconductor branded processors 357 * Handle National Semiconductor branded processors
358 */ 358 */
359static void __cpuinit init_nsc(struct cpuinfo_x86 *c) 359static void init_nsc(struct cpuinfo_x86 *c)
360{ 360{
361 /* 361 /*
362 * There may be GX1 processors in the wild that are branded 362 * There may be GX1 processors in the wild that are branded
@@ -405,7 +405,7 @@ static inline int test_cyrix_52div(void)
405 return (unsigned char) (test >> 8) == 0x02; 405 return (unsigned char) (test >> 8) == 0x02;
406} 406}
407 407
408static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) 408static void cyrix_identify(struct cpuinfo_x86 *c)
409{ 409{
410 /* Detect Cyrix with disabled CPUID */ 410 /* Detect Cyrix with disabled CPUID */
411 if (c->x86 == 4 && test_cyrix_52div()) { 411 if (c->x86 == 4 && test_cyrix_52div()) {
@@ -441,7 +441,7 @@ static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c)
441 } 441 }
442} 442}
443 443
444static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { 444static const struct cpu_dev cyrix_cpu_dev = {
445 .c_vendor = "Cyrix", 445 .c_vendor = "Cyrix",
446 .c_ident = { "CyrixInstead" }, 446 .c_ident = { "CyrixInstead" },
447 .c_early_init = early_init_cyrix, 447 .c_early_init = early_init_cyrix,
@@ -452,7 +452,7 @@ static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = {
452 452
453cpu_dev_register(cyrix_cpu_dev); 453cpu_dev_register(cyrix_cpu_dev);
454 454
455static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { 455static const struct cpu_dev nsc_cpu_dev = {
456 .c_vendor = "NSC", 456 .c_vendor = "NSC",
457 .c_ident = { "Geode by NSC" }, 457 .c_ident = { "Geode by NSC" },
458 .c_init = init_nsc, 458 .c_init = init_nsc,
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 1e7e84a02eba..87279212d318 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -60,7 +60,7 @@ detect_hypervisor_vendor(void)
60 } 60 }
61} 61}
62 62
63void __cpuinit init_hypervisor(struct cpuinfo_x86 *c) 63void init_hypervisor(struct cpuinfo_x86 *c)
64{ 64{
65 if (x86_hyper && x86_hyper->set_cpu_features) 65 if (x86_hyper && x86_hyper->set_cpu_features)
66 x86_hyper->set_cpu_features(c); 66 x86_hyper->set_cpu_features(c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 9b0c441c03f5..ec7299566f79 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -26,7 +26,7 @@
26#include <asm/apic.h> 26#include <asm/apic.h>
27#endif 27#endif
28 28
29static void __cpuinit early_init_intel(struct cpuinfo_x86 *c) 29static void early_init_intel(struct cpuinfo_x86 *c)
30{ 30{
31 u64 misc_enable; 31 u64 misc_enable;
32 32
@@ -163,7 +163,7 @@ static void __cpuinit early_init_intel(struct cpuinfo_x86 *c)
163 * This is called before we do cpu ident work 163 * This is called before we do cpu ident work
164 */ 164 */
165 165
166int __cpuinit ppro_with_ram_bug(void) 166int ppro_with_ram_bug(void)
167{ 167{
168 /* Uses data from early_cpu_detect now */ 168 /* Uses data from early_cpu_detect now */
169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 169 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -176,7 +176,7 @@ int __cpuinit ppro_with_ram_bug(void)
176 return 0; 176 return 0;
177} 177}
178 178
179static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c) 179static void intel_smp_check(struct cpuinfo_x86 *c)
180{ 180{
181 /* calling is from identify_secondary_cpu() ? */ 181 /* calling is from identify_secondary_cpu() ? */
182 if (!c->cpu_index) 182 if (!c->cpu_index)
@@ -196,7 +196,7 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
196 } 196 }
197} 197}
198 198
199static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 199static void intel_workarounds(struct cpuinfo_x86 *c)
200{ 200{
201 unsigned long lo, hi; 201 unsigned long lo, hi;
202 202
@@ -275,12 +275,12 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
275 intel_smp_check(c); 275 intel_smp_check(c);
276} 276}
277#else 277#else
278static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c) 278static void intel_workarounds(struct cpuinfo_x86 *c)
279{ 279{
280} 280}
281#endif 281#endif
282 282
283static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 283static void srat_detect_node(struct cpuinfo_x86 *c)
284{ 284{
285#ifdef CONFIG_NUMA 285#ifdef CONFIG_NUMA
286 unsigned node; 286 unsigned node;
@@ -300,7 +300,7 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
300/* 300/*
301 * find out the number of processor cores on the die 301 * find out the number of processor cores on the die
302 */ 302 */
303static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c) 303static int intel_num_cpu_cores(struct cpuinfo_x86 *c)
304{ 304{
305 unsigned int eax, ebx, ecx, edx; 305 unsigned int eax, ebx, ecx, edx;
306 306
@@ -315,7 +315,7 @@ static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
315 return 1; 315 return 1;
316} 316}
317 317
318static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c) 318static void detect_vmx_virtcap(struct cpuinfo_x86 *c)
319{ 319{
320 /* Intel VMX MSR indicated features */ 320 /* Intel VMX MSR indicated features */
321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000 321#define X86_VMX_FEATURE_PROC_CTLS_TPR_SHADOW 0x00200000
@@ -353,7 +353,7 @@ static void __cpuinit detect_vmx_virtcap(struct cpuinfo_x86 *c)
353 } 353 }
354} 354}
355 355
356static void __cpuinit init_intel(struct cpuinfo_x86 *c) 356static void init_intel(struct cpuinfo_x86 *c)
357{ 357{
358 unsigned int l2 = 0; 358 unsigned int l2 = 0;
359 359
@@ -472,7 +472,7 @@ static void __cpuinit init_intel(struct cpuinfo_x86 *c)
472} 472}
473 473
474#ifdef CONFIG_X86_32 474#ifdef CONFIG_X86_32
475static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned int size) 475static unsigned int intel_size_cache(struct cpuinfo_x86 *c, unsigned int size)
476{ 476{
477 /* 477 /*
478 * Intel PIII Tualatin. This comes in two flavours. 478 * Intel PIII Tualatin. This comes in two flavours.
@@ -506,7 +506,7 @@ static unsigned int __cpuinit intel_size_cache(struct cpuinfo_x86 *c, unsigned i
506 506
507#define STLB_4K 0x41 507#define STLB_4K 0x41
508 508
509static const struct _tlb_table intel_tlb_table[] __cpuinitconst = { 509static const struct _tlb_table intel_tlb_table[] = {
510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" }, 510 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" }, 511 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" }, 512 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
@@ -536,7 +536,7 @@ static const struct _tlb_table intel_tlb_table[] __cpuinitconst = {
536 { 0x00, 0, 0 } 536 { 0x00, 0, 0 }
537}; 537};
538 538
539static void __cpuinit intel_tlb_lookup(const unsigned char desc) 539static void intel_tlb_lookup(const unsigned char desc)
540{ 540{
541 unsigned char k; 541 unsigned char k;
542 if (desc == 0) 542 if (desc == 0)
@@ -605,7 +605,7 @@ static void __cpuinit intel_tlb_lookup(const unsigned char desc)
605 } 605 }
606} 606}
607 607
608static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c) 608static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
609{ 609{
610 switch ((c->x86 << 8) + c->x86_model) { 610 switch ((c->x86 << 8) + c->x86_model) {
611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */ 611 case 0x60f: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
@@ -634,7 +634,7 @@ static void __cpuinit intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
634 } 634 }
635} 635}
636 636
637static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c) 637static void intel_detect_tlb(struct cpuinfo_x86 *c)
638{ 638{
639 int i, j, n; 639 int i, j, n;
640 unsigned int regs[4]; 640 unsigned int regs[4];
@@ -661,7 +661,7 @@ static void __cpuinit intel_detect_tlb(struct cpuinfo_x86 *c)
661 intel_tlb_flushall_shift_set(c); 661 intel_tlb_flushall_shift_set(c);
662} 662}
663 663
664static const struct cpu_dev __cpuinitconst intel_cpu_dev = { 664static const struct cpu_dev intel_cpu_dev = {
665 .c_vendor = "Intel", 665 .c_vendor = "Intel",
666 .c_ident = { "GenuineIntel" }, 666 .c_ident = { "GenuineIntel" },
667#ifdef CONFIG_X86_32 667#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 8dc72dda66fe..1414c90feaba 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -37,7 +37,7 @@ struct _cache_table {
37/* All the cache descriptor types we care about (no TLB or 37/* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */ 38 trace cache entries) */
39 39
40static const struct _cache_table __cpuinitconst cache_table[] = 40static const struct _cache_table cache_table[] =
41{ 41{
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -203,7 +203,7 @@ union l3_cache {
203 unsigned val; 203 unsigned val;
204}; 204};
205 205
206static const unsigned short __cpuinitconst assocs[] = { 206static const unsigned short assocs[] = {
207 [1] = 1, 207 [1] = 1,
208 [2] = 2, 208 [2] = 2,
209 [4] = 4, 209 [4] = 4,
@@ -217,10 +217,10 @@ static const unsigned short __cpuinitconst assocs[] = {
217 [0xf] = 0xffff /* fully associative - no way to show this currently */ 217 [0xf] = 0xffff /* fully associative - no way to show this currently */
218}; 218};
219 219
220static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 }; 220static const unsigned char levels[] = { 1, 1, 2, 3 };
221static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 }; 221static const unsigned char types[] = { 1, 2, 3, 3 };
222 222
223static void __cpuinit 223static void
224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax, 224amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
225 union _cpuid4_leaf_ebx *ebx, 225 union _cpuid4_leaf_ebx *ebx,
226 union _cpuid4_leaf_ecx *ecx) 226 union _cpuid4_leaf_ecx *ecx)
@@ -302,7 +302,7 @@ struct _cache_attr {
302/* 302/*
303 * L3 cache descriptors 303 * L3 cache descriptors
304 */ 304 */
305static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) 305static void amd_calc_l3_indices(struct amd_northbridge *nb)
306{ 306{
307 struct amd_l3_cache *l3 = &nb->l3_cache; 307 struct amd_l3_cache *l3 = &nb->l3_cache;
308 unsigned int sc0, sc1, sc2, sc3; 308 unsigned int sc0, sc1, sc2, sc3;
@@ -325,7 +325,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb)
325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; 325 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
326} 326}
327 327
328static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) 328static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
329{ 329{
330 int node; 330 int node;
331 331
@@ -528,8 +528,7 @@ static struct _cache_attr subcaches =
528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */ 528#endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
529 529
530static int 530static int
531__cpuinit cpuid4_cache_lookup_regs(int index, 531cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
532 struct _cpuid4_info_regs *this_leaf)
533{ 532{
534 union _cpuid4_leaf_eax eax; 533 union _cpuid4_leaf_eax eax;
535 union _cpuid4_leaf_ebx ebx; 534 union _cpuid4_leaf_ebx ebx;
@@ -560,7 +559,7 @@ __cpuinit cpuid4_cache_lookup_regs(int index,
560 return 0; 559 return 0;
561} 560}
562 561
563static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c) 562static int find_num_cache_leaves(struct cpuinfo_x86 *c)
564{ 563{
565 unsigned int eax, ebx, ecx, edx, op; 564 unsigned int eax, ebx, ecx, edx, op;
566 union _cpuid4_leaf_eax cache_eax; 565 union _cpuid4_leaf_eax cache_eax;
@@ -580,7 +579,7 @@ static int __cpuinit find_num_cache_leaves(struct cpuinfo_x86 *c)
580 return i; 579 return i;
581} 580}
582 581
583void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c) 582void init_amd_cacheinfo(struct cpuinfo_x86 *c)
584{ 583{
585 584
586 if (cpu_has_topoext) { 585 if (cpu_has_topoext) {
@@ -593,7 +592,7 @@ void __cpuinit init_amd_cacheinfo(struct cpuinfo_x86 *c)
593 } 592 }
594} 593}
595 594
596unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c) 595unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
597{ 596{
598 /* Cache sizes */ 597 /* Cache sizes */
599 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; 598 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
@@ -744,7 +743,7 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info);
744 743
745#ifdef CONFIG_SMP 744#ifdef CONFIG_SMP
746 745
747static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) 746static int cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
748{ 747{
749 struct _cpuid4_info *this_leaf; 748 struct _cpuid4_info *this_leaf;
750 int i, sibling; 749 int i, sibling;
@@ -793,7 +792,7 @@ static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index)
793 return 1; 792 return 1;
794} 793}
795 794
796static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 795static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
797{ 796{
798 struct _cpuid4_info *this_leaf, *sibling_leaf; 797 struct _cpuid4_info *this_leaf, *sibling_leaf;
799 unsigned long num_threads_sharing; 798 unsigned long num_threads_sharing;
@@ -828,7 +827,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
828 } 827 }
829 } 828 }
830} 829}
831static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 830static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
832{ 831{
833 struct _cpuid4_info *this_leaf, *sibling_leaf; 832 struct _cpuid4_info *this_leaf, *sibling_leaf;
834 int sibling; 833 int sibling;
@@ -841,16 +840,16 @@ static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
841 } 840 }
842} 841}
843#else 842#else
844static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 843static void cache_shared_cpu_map_setup(unsigned int cpu, int index)
845{ 844{
846} 845}
847 846
848static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index) 847static void cache_remove_shared_cpu_map(unsigned int cpu, int index)
849{ 848{
850} 849}
851#endif 850#endif
852 851
853static void __cpuinit free_cache_attributes(unsigned int cpu) 852static void free_cache_attributes(unsigned int cpu)
854{ 853{
855 int i; 854 int i;
856 855
@@ -861,7 +860,7 @@ static void __cpuinit free_cache_attributes(unsigned int cpu)
861 per_cpu(ici_cpuid4_info, cpu) = NULL; 860 per_cpu(ici_cpuid4_info, cpu) = NULL;
862} 861}
863 862
864static void __cpuinit get_cpu_leaves(void *_retval) 863static void get_cpu_leaves(void *_retval)
865{ 864{
866 int j, *retval = _retval, cpu = smp_processor_id(); 865 int j, *retval = _retval, cpu = smp_processor_id();
867 866
@@ -881,7 +880,7 @@ static void __cpuinit get_cpu_leaves(void *_retval)
881 } 880 }
882} 881}
883 882
884static int __cpuinit detect_cache_attributes(unsigned int cpu) 883static int detect_cache_attributes(unsigned int cpu)
885{ 884{
886 int retval; 885 int retval;
887 886
@@ -1015,7 +1014,7 @@ static struct attribute *default_attrs[] = {
1015}; 1014};
1016 1015
1017#ifdef CONFIG_AMD_NB 1016#ifdef CONFIG_AMD_NB
1018static struct attribute ** __cpuinit amd_l3_attrs(void) 1017static struct attribute **amd_l3_attrs(void)
1019{ 1018{
1020 static struct attribute **attrs; 1019 static struct attribute **attrs;
1021 int n; 1020 int n;
@@ -1091,7 +1090,7 @@ static struct kobj_type ktype_percpu_entry = {
1091 .sysfs_ops = &sysfs_ops, 1090 .sysfs_ops = &sysfs_ops,
1092}; 1091};
1093 1092
1094static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu) 1093static void cpuid4_cache_sysfs_exit(unsigned int cpu)
1095{ 1094{
1096 kfree(per_cpu(ici_cache_kobject, cpu)); 1095 kfree(per_cpu(ici_cache_kobject, cpu));
1097 kfree(per_cpu(ici_index_kobject, cpu)); 1096 kfree(per_cpu(ici_index_kobject, cpu));
@@ -1100,7 +1099,7 @@ static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
1100 free_cache_attributes(cpu); 1099 free_cache_attributes(cpu);
1101} 1100}
1102 1101
1103static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu) 1102static int cpuid4_cache_sysfs_init(unsigned int cpu)
1104{ 1103{
1105 int err; 1104 int err;
1106 1105
@@ -1132,7 +1131,7 @@ err_out:
1132static DECLARE_BITMAP(cache_dev_map, NR_CPUS); 1131static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
1133 1132
1134/* Add/Remove cache interface for CPU device */ 1133/* Add/Remove cache interface for CPU device */
1135static int __cpuinit cache_add_dev(struct device *dev) 1134static int cache_add_dev(struct device *dev)
1136{ 1135{
1137 unsigned int cpu = dev->id; 1136 unsigned int cpu = dev->id;
1138 unsigned long i, j; 1137 unsigned long i, j;
@@ -1183,7 +1182,7 @@ static int __cpuinit cache_add_dev(struct device *dev)
1183 return 0; 1182 return 0;
1184} 1183}
1185 1184
1186static void __cpuinit cache_remove_dev(struct device *dev) 1185static void cache_remove_dev(struct device *dev)
1187{ 1186{
1188 unsigned int cpu = dev->id; 1187 unsigned int cpu = dev->id;
1189 unsigned long i; 1188 unsigned long i;
@@ -1200,8 +1199,8 @@ static void __cpuinit cache_remove_dev(struct device *dev)
1200 cpuid4_cache_sysfs_exit(cpu); 1199 cpuid4_cache_sysfs_exit(cpu);
1201} 1200}
1202 1201
1203static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb, 1202static int cacheinfo_cpu_callback(struct notifier_block *nfb,
1204 unsigned long action, void *hcpu) 1203 unsigned long action, void *hcpu)
1205{ 1204{
1206 unsigned int cpu = (unsigned long)hcpu; 1205 unsigned int cpu = (unsigned long)hcpu;
1207 struct device *dev; 1206 struct device *dev;
@@ -1220,7 +1219,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
1220 return NOTIFY_OK; 1219 return NOTIFY_OK;
1221} 1220}
1222 1221
1223static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = { 1222static struct notifier_block cacheinfo_cpu_notifier = {
1224 .notifier_call = cacheinfo_cpu_callback, 1223 .notifier_call = cacheinfo_cpu_callback,
1225}; 1224};
1226 1225
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index bf49cdbb010f..87a65c939bcd 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1363,7 +1363,7 @@ int mce_notify_irq(void)
1363} 1363}
1364EXPORT_SYMBOL_GPL(mce_notify_irq); 1364EXPORT_SYMBOL_GPL(mce_notify_irq);
1365 1365
1366static int __cpuinit __mcheck_cpu_mce_banks_init(void) 1366static int __mcheck_cpu_mce_banks_init(void)
1367{ 1367{
1368 int i; 1368 int i;
1369 u8 num_banks = mca_cfg.banks; 1369 u8 num_banks = mca_cfg.banks;
@@ -1384,7 +1384,7 @@ static int __cpuinit __mcheck_cpu_mce_banks_init(void)
1384/* 1384/*
1385 * Initialize Machine Checks for a CPU. 1385 * Initialize Machine Checks for a CPU.
1386 */ 1386 */
1387static int __cpuinit __mcheck_cpu_cap_init(void) 1387static int __mcheck_cpu_cap_init(void)
1388{ 1388{
1389 unsigned b; 1389 unsigned b;
1390 u64 cap; 1390 u64 cap;
@@ -1483,7 +1483,7 @@ static void quirk_sandybridge_ifu(int bank, struct mce *m, struct pt_regs *regs)
1483} 1483}
1484 1484
1485/* Add per CPU specific workarounds here */ 1485/* Add per CPU specific workarounds here */
1486static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c) 1486static int __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1487{ 1487{
1488 struct mca_config *cfg = &mca_cfg; 1488 struct mca_config *cfg = &mca_cfg;
1489 1489
@@ -1593,7 +1593,7 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1593 return 0; 1593 return 0;
1594} 1594}
1595 1595
1596static int __cpuinit __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c) 1596static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1597{ 1597{
1598 if (c->x86 != 5) 1598 if (c->x86 != 5)
1599 return 0; 1599 return 0;
@@ -1664,7 +1664,7 @@ void (*machine_check_vector)(struct pt_regs *, long error_code) =
1664 * Called for each booted CPU to set up machine checks. 1664 * Called for each booted CPU to set up machine checks.
1665 * Must be called with preempt off: 1665 * Must be called with preempt off:
1666 */ 1666 */
1667void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) 1667void mcheck_cpu_init(struct cpuinfo_x86 *c)
1668{ 1668{
1669 if (mca_cfg.disabled) 1669 if (mca_cfg.disabled)
1670 return; 1670 return;
@@ -2082,7 +2082,6 @@ static struct bus_type mce_subsys = {
2082 2082
2083DEFINE_PER_CPU(struct device *, mce_device); 2083DEFINE_PER_CPU(struct device *, mce_device);
2084 2084
2085__cpuinitdata
2086void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu); 2085void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
2087 2086
2088static inline struct mce_bank *attr_to_bank(struct device_attribute *attr) 2087static inline struct mce_bank *attr_to_bank(struct device_attribute *attr)
@@ -2228,7 +2227,7 @@ static void mce_device_release(struct device *dev)
2228} 2227}
2229 2228
2230/* Per cpu device init. All of the cpus still share the same ctrl bank: */ 2229/* Per cpu device init. All of the cpus still share the same ctrl bank: */
2231static __cpuinit int mce_device_create(unsigned int cpu) 2230static int mce_device_create(unsigned int cpu)
2232{ 2231{
2233 struct device *dev; 2232 struct device *dev;
2234 int err; 2233 int err;
@@ -2274,7 +2273,7 @@ error:
2274 return err; 2273 return err;
2275} 2274}
2276 2275
2277static __cpuinit void mce_device_remove(unsigned int cpu) 2276static void mce_device_remove(unsigned int cpu)
2278{ 2277{
2279 struct device *dev = per_cpu(mce_device, cpu); 2278 struct device *dev = per_cpu(mce_device, cpu);
2280 int i; 2279 int i;
@@ -2294,7 +2293,7 @@ static __cpuinit void mce_device_remove(unsigned int cpu)
2294} 2293}
2295 2294
2296/* Make sure there are no machine checks on offlined CPUs. */ 2295/* Make sure there are no machine checks on offlined CPUs. */
2297static void __cpuinit mce_disable_cpu(void *h) 2296static void mce_disable_cpu(void *h)
2298{ 2297{
2299 unsigned long action = *(unsigned long *)h; 2298 unsigned long action = *(unsigned long *)h;
2300 int i; 2299 int i;
@@ -2312,7 +2311,7 @@ static void __cpuinit mce_disable_cpu(void *h)
2312 } 2311 }
2313} 2312}
2314 2313
2315static void __cpuinit mce_reenable_cpu(void *h) 2314static void mce_reenable_cpu(void *h)
2316{ 2315{
2317 unsigned long action = *(unsigned long *)h; 2316 unsigned long action = *(unsigned long *)h;
2318 int i; 2317 int i;
@@ -2331,7 +2330,7 @@ static void __cpuinit mce_reenable_cpu(void *h)
2331} 2330}
2332 2331
2333/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 2332/* Get notified when a cpu comes on/off. Be hotplug friendly. */
2334static int __cpuinit 2333static int
2335mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) 2334mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2336{ 2335{
2337 unsigned int cpu = (unsigned long)hcpu; 2336 unsigned int cpu = (unsigned long)hcpu;
@@ -2367,7 +2366,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2367 return NOTIFY_OK; 2366 return NOTIFY_OK;
2368} 2367}
2369 2368
2370static struct notifier_block mce_cpu_notifier __cpuinitdata = { 2369static struct notifier_block mce_cpu_notifier = {
2371 .notifier_call = mce_cpu_callback, 2370 .notifier_call = mce_cpu_callback,
2372}; 2371};
2373 2372
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 9cb52767999a..603df4f74640 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -458,10 +458,8 @@ static struct kobj_type threshold_ktype = {
458 .default_attrs = default_attrs, 458 .default_attrs = default_attrs,
459}; 459};
460 460
461static __cpuinit int allocate_threshold_blocks(unsigned int cpu, 461static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
462 unsigned int bank, 462 unsigned int block, u32 address)
463 unsigned int block,
464 u32 address)
465{ 463{
466 struct threshold_block *b = NULL; 464 struct threshold_block *b = NULL;
467 u32 low, high; 465 u32 low, high;
@@ -543,7 +541,7 @@ out_free:
543 return err; 541 return err;
544} 542}
545 543
546static __cpuinit int __threshold_add_blocks(struct threshold_bank *b) 544static int __threshold_add_blocks(struct threshold_bank *b)
547{ 545{
548 struct list_head *head = &b->blocks->miscj; 546 struct list_head *head = &b->blocks->miscj;
549 struct threshold_block *pos = NULL; 547 struct threshold_block *pos = NULL;
@@ -567,7 +565,7 @@ static __cpuinit int __threshold_add_blocks(struct threshold_bank *b)
567 return err; 565 return err;
568} 566}
569 567
570static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) 568static int threshold_create_bank(unsigned int cpu, unsigned int bank)
571{ 569{
572 struct device *dev = per_cpu(mce_device, cpu); 570 struct device *dev = per_cpu(mce_device, cpu);
573 struct amd_northbridge *nb = NULL; 571 struct amd_northbridge *nb = NULL;
@@ -632,7 +630,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
632} 630}
633 631
634/* create dir/files for all valid threshold banks */ 632/* create dir/files for all valid threshold banks */
635static __cpuinit int threshold_create_device(unsigned int cpu) 633static int threshold_create_device(unsigned int cpu)
636{ 634{
637 unsigned int bank; 635 unsigned int bank;
638 struct threshold_bank **bp; 636 struct threshold_bank **bp;
@@ -736,7 +734,7 @@ static void threshold_remove_device(unsigned int cpu)
736} 734}
737 735
738/* get notified when a cpu comes on/off */ 736/* get notified when a cpu comes on/off */
739static void __cpuinit 737static void
740amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu) 738amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
741{ 739{
742 switch (action) { 740 switch (action) {
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 41e8e00a6637..3eec7de76efb 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -240,8 +240,7 @@ __setup("int_pln_enable", int_pln_enable_setup);
240 240
241#ifdef CONFIG_SYSFS 241#ifdef CONFIG_SYSFS
242/* Add/Remove thermal_throttle interface for CPU device: */ 242/* Add/Remove thermal_throttle interface for CPU device: */
243static __cpuinit int thermal_throttle_add_dev(struct device *dev, 243static int thermal_throttle_add_dev(struct device *dev, unsigned int cpu)
244 unsigned int cpu)
245{ 244{
246 int err; 245 int err;
247 struct cpuinfo_x86 *c = &cpu_data(cpu); 246 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -267,7 +266,7 @@ static __cpuinit int thermal_throttle_add_dev(struct device *dev,
267 return err; 266 return err;
268} 267}
269 268
270static __cpuinit void thermal_throttle_remove_dev(struct device *dev) 269static void thermal_throttle_remove_dev(struct device *dev)
271{ 270{
272 sysfs_remove_group(&dev->kobj, &thermal_attr_group); 271 sysfs_remove_group(&dev->kobj, &thermal_attr_group);
273} 272}
@@ -276,7 +275,7 @@ static __cpuinit void thermal_throttle_remove_dev(struct device *dev)
276static DEFINE_MUTEX(therm_cpu_lock); 275static DEFINE_MUTEX(therm_cpu_lock);
277 276
278/* Get notified when a cpu comes on/off. Be hotplug friendly. */ 277/* Get notified when a cpu comes on/off. Be hotplug friendly. */
279static __cpuinit int 278static int
280thermal_throttle_cpu_callback(struct notifier_block *nfb, 279thermal_throttle_cpu_callback(struct notifier_block *nfb,
281 unsigned long action, 280 unsigned long action,
282 void *hcpu) 281 void *hcpu)
@@ -307,7 +306,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
307 return notifier_from_errno(err); 306 return notifier_from_errno(err);
308} 307}
309 308
310static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata = 309static struct notifier_block thermal_throttle_cpu_notifier =
311{ 310{
312 .notifier_call = thermal_throttle_cpu_callback, 311 .notifier_call = thermal_throttle_cpu_callback,
313}; 312};
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 9e581c5cf6d0..a7c7305030cc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1295,7 +1295,7 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
1295struct event_constraint emptyconstraint; 1295struct event_constraint emptyconstraint;
1296struct event_constraint unconstrained; 1296struct event_constraint unconstrained;
1297 1297
1298static int __cpuinit 1298static int
1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 1299x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1300{ 1300{
1301 unsigned int cpu = (long)hcpu; 1301 unsigned int cpu = (long)hcpu;
diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
index 5f0581e713c2..e09f0bfb7b8f 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c
@@ -851,7 +851,7 @@ static void clear_APIC_ibs(void *dummy)
851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); 851 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
852} 852}
853 853
854static int __cpuinit 854static int
855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 855perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
856{ 856{
857 switch (action & ~CPU_TASKS_FROZEN) { 857 switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index c0c661adf03e..754291adec33 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -288,13 +288,13 @@ static struct pmu amd_l2_pmu = {
288 .read = amd_uncore_read, 288 .read = amd_uncore_read,
289}; 289};
290 290
291static struct amd_uncore * __cpuinit amd_uncore_alloc(unsigned int cpu) 291static struct amd_uncore *amd_uncore_alloc(unsigned int cpu)
292{ 292{
293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL, 293 return kzalloc_node(sizeof(struct amd_uncore), GFP_KERNEL,
294 cpu_to_node(cpu)); 294 cpu_to_node(cpu));
295} 295}
296 296
297static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu) 297static void amd_uncore_cpu_up_prepare(unsigned int cpu)
298{ 298{
299 struct amd_uncore *uncore; 299 struct amd_uncore *uncore;
300 300
@@ -322,8 +322,8 @@ static void __cpuinit amd_uncore_cpu_up_prepare(unsigned int cpu)
322} 322}
323 323
324static struct amd_uncore * 324static struct amd_uncore *
325__cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this, 325amd_uncore_find_online_sibling(struct amd_uncore *this,
326 struct amd_uncore * __percpu *uncores) 326 struct amd_uncore * __percpu *uncores)
327{ 327{
328 unsigned int cpu; 328 unsigned int cpu;
329 struct amd_uncore *that; 329 struct amd_uncore *that;
@@ -348,7 +348,7 @@ __cpuinit amd_uncore_find_online_sibling(struct amd_uncore *this,
348 return this; 348 return this;
349} 349}
350 350
351static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu) 351static void amd_uncore_cpu_starting(unsigned int cpu)
352{ 352{
353 unsigned int eax, ebx, ecx, edx; 353 unsigned int eax, ebx, ecx, edx;
354 struct amd_uncore *uncore; 354 struct amd_uncore *uncore;
@@ -376,8 +376,8 @@ static void __cpuinit amd_uncore_cpu_starting(unsigned int cpu)
376 } 376 }
377} 377}
378 378
379static void __cpuinit uncore_online(unsigned int cpu, 379static void uncore_online(unsigned int cpu,
380 struct amd_uncore * __percpu *uncores) 380 struct amd_uncore * __percpu *uncores)
381{ 381{
382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 382 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
383 383
@@ -388,7 +388,7 @@ static void __cpuinit uncore_online(unsigned int cpu,
388 cpumask_set_cpu(cpu, uncore->active_mask); 388 cpumask_set_cpu(cpu, uncore->active_mask);
389} 389}
390 390
391static void __cpuinit amd_uncore_cpu_online(unsigned int cpu) 391static void amd_uncore_cpu_online(unsigned int cpu)
392{ 392{
393 if (amd_uncore_nb) 393 if (amd_uncore_nb)
394 uncore_online(cpu, amd_uncore_nb); 394 uncore_online(cpu, amd_uncore_nb);
@@ -397,8 +397,8 @@ static void __cpuinit amd_uncore_cpu_online(unsigned int cpu)
397 uncore_online(cpu, amd_uncore_l2); 397 uncore_online(cpu, amd_uncore_l2);
398} 398}
399 399
400static void __cpuinit uncore_down_prepare(unsigned int cpu, 400static void uncore_down_prepare(unsigned int cpu,
401 struct amd_uncore * __percpu *uncores) 401 struct amd_uncore * __percpu *uncores)
402{ 402{
403 unsigned int i; 403 unsigned int i;
404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu); 404 struct amd_uncore *this = *per_cpu_ptr(uncores, cpu);
@@ -423,7 +423,7 @@ static void __cpuinit uncore_down_prepare(unsigned int cpu,
423 } 423 }
424} 424}
425 425
426static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu) 426static void amd_uncore_cpu_down_prepare(unsigned int cpu)
427{ 427{
428 if (amd_uncore_nb) 428 if (amd_uncore_nb)
429 uncore_down_prepare(cpu, amd_uncore_nb); 429 uncore_down_prepare(cpu, amd_uncore_nb);
@@ -432,8 +432,7 @@ static void __cpuinit amd_uncore_cpu_down_prepare(unsigned int cpu)
432 uncore_down_prepare(cpu, amd_uncore_l2); 432 uncore_down_prepare(cpu, amd_uncore_l2);
433} 433}
434 434
435static void __cpuinit uncore_dead(unsigned int cpu, 435static void uncore_dead(unsigned int cpu, struct amd_uncore * __percpu *uncores)
436 struct amd_uncore * __percpu *uncores)
437{ 436{
438 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu); 437 struct amd_uncore *uncore = *per_cpu_ptr(uncores, cpu);
439 438
@@ -445,7 +444,7 @@ static void __cpuinit uncore_dead(unsigned int cpu,
445 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL; 444 *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
446} 445}
447 446
448static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu) 447static void amd_uncore_cpu_dead(unsigned int cpu)
449{ 448{
450 if (amd_uncore_nb) 449 if (amd_uncore_nb)
451 uncore_dead(cpu, amd_uncore_nb); 450 uncore_dead(cpu, amd_uncore_nb);
@@ -454,7 +453,7 @@ static void __cpuinit amd_uncore_cpu_dead(unsigned int cpu)
454 uncore_dead(cpu, amd_uncore_l2); 453 uncore_dead(cpu, amd_uncore_l2);
455} 454}
456 455
457static int __cpuinit 456static int
458amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action, 457amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
459 void *hcpu) 458 void *hcpu)
460{ 459{
@@ -489,7 +488,7 @@ amd_uncore_cpu_notifier(struct notifier_block *self, unsigned long action,
489 return NOTIFY_OK; 488 return NOTIFY_OK;
490} 489}
491 490
492static struct notifier_block amd_uncore_cpu_notifier_block __cpuinitdata = { 491static struct notifier_block amd_uncore_cpu_notifier_block = {
493 .notifier_call = amd_uncore_cpu_notifier, 492 .notifier_call = amd_uncore_cpu_notifier,
494 .priority = CPU_PRI_PERF + 1, 493 .priority = CPU_PRI_PERF + 1,
495}; 494};
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 9dd99751ccf9..cad791dbde95 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -3297,7 +3297,7 @@ static void __init uncore_pci_exit(void)
3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */ 3297/* CPU hot plug/unplug are serialized by cpu_add_remove_lock mutex */
3298static LIST_HEAD(boxes_to_free); 3298static LIST_HEAD(boxes_to_free);
3299 3299
3300static void __cpuinit uncore_kfree_boxes(void) 3300static void uncore_kfree_boxes(void)
3301{ 3301{
3302 struct intel_uncore_box *box; 3302 struct intel_uncore_box *box;
3303 3303
@@ -3309,7 +3309,7 @@ static void __cpuinit uncore_kfree_boxes(void)
3309 } 3309 }
3310} 3310}
3311 3311
3312static void __cpuinit uncore_cpu_dying(int cpu) 3312static void uncore_cpu_dying(int cpu)
3313{ 3313{
3314 struct intel_uncore_type *type; 3314 struct intel_uncore_type *type;
3315 struct intel_uncore_pmu *pmu; 3315 struct intel_uncore_pmu *pmu;
@@ -3328,7 +3328,7 @@ static void __cpuinit uncore_cpu_dying(int cpu)
3328 } 3328 }
3329} 3329}
3330 3330
3331static int __cpuinit uncore_cpu_starting(int cpu) 3331static int uncore_cpu_starting(int cpu)
3332{ 3332{
3333 struct intel_uncore_type *type; 3333 struct intel_uncore_type *type;
3334 struct intel_uncore_pmu *pmu; 3334 struct intel_uncore_pmu *pmu;
@@ -3371,7 +3371,7 @@ static int __cpuinit uncore_cpu_starting(int cpu)
3371 return 0; 3371 return 0;
3372} 3372}
3373 3373
3374static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id) 3374static int uncore_cpu_prepare(int cpu, int phys_id)
3375{ 3375{
3376 struct intel_uncore_type *type; 3376 struct intel_uncore_type *type;
3377 struct intel_uncore_pmu *pmu; 3377 struct intel_uncore_pmu *pmu;
@@ -3397,7 +3397,7 @@ static int __cpuinit uncore_cpu_prepare(int cpu, int phys_id)
3397 return 0; 3397 return 0;
3398} 3398}
3399 3399
3400static void __cpuinit 3400static void
3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu) 3401uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_cpu)
3402{ 3402{
3403 struct intel_uncore_type *type; 3403 struct intel_uncore_type *type;
@@ -3435,7 +3435,7 @@ uncore_change_context(struct intel_uncore_type **uncores, int old_cpu, int new_c
3435 } 3435 }
3436} 3436}
3437 3437
3438static void __cpuinit uncore_event_exit_cpu(int cpu) 3438static void uncore_event_exit_cpu(int cpu)
3439{ 3439{
3440 int i, phys_id, target; 3440 int i, phys_id, target;
3441 3441
@@ -3463,7 +3463,7 @@ static void __cpuinit uncore_event_exit_cpu(int cpu)
3463 uncore_change_context(pci_uncores, cpu, target); 3463 uncore_change_context(pci_uncores, cpu, target);
3464} 3464}
3465 3465
3466static void __cpuinit uncore_event_init_cpu(int cpu) 3466static void uncore_event_init_cpu(int cpu)
3467{ 3467{
3468 int i, phys_id; 3468 int i, phys_id;
3469 3469
@@ -3479,8 +3479,8 @@ static void __cpuinit uncore_event_init_cpu(int cpu)
3479 uncore_change_context(pci_uncores, -1, cpu); 3479 uncore_change_context(pci_uncores, -1, cpu);
3480} 3480}
3481 3481
3482static int 3482static int uncore_cpu_notifier(struct notifier_block *self,
3483 __cpuinit uncore_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) 3483 unsigned long action, void *hcpu)
3484{ 3484{
3485 unsigned int cpu = (long)hcpu; 3485 unsigned int cpu = (long)hcpu;
3486 3486
@@ -3520,7 +3520,7 @@ static int
3520 return NOTIFY_OK; 3520 return NOTIFY_OK;
3521} 3521}
3522 3522
3523static struct notifier_block uncore_cpu_nb __cpuinitdata = { 3523static struct notifier_block uncore_cpu_nb = {
3524 .notifier_call = uncore_cpu_notifier, 3524 .notifier_call = uncore_cpu_notifier,
3525 /* 3525 /*
3526 * to migrate uncore events, our notifier should be executed 3526 * to migrate uncore events, our notifier should be executed
diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c
index feca286c2bb4..88db010845cb 100644
--- a/arch/x86/kernel/cpu/rdrand.c
+++ b/arch/x86/kernel/cpu/rdrand.c
@@ -52,7 +52,7 @@ static inline int rdrand_long(unsigned long *v)
52 */ 52 */
53#define RESEED_LOOP ((512*128)/sizeof(unsigned long)) 53#define RESEED_LOOP ((512*128)/sizeof(unsigned long))
54 54
55void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) 55void x86_init_rdrand(struct cpuinfo_x86 *c)
56{ 56{
57#ifdef CONFIG_ARCH_RANDOM 57#ifdef CONFIG_ARCH_RANDOM
58 unsigned long tmp; 58 unsigned long tmp;
diff --git a/arch/x86/kernel/cpu/scattered.c b/arch/x86/kernel/cpu/scattered.c
index d92b5dad15dd..f2cc63e9cf08 100644
--- a/arch/x86/kernel/cpu/scattered.c
+++ b/arch/x86/kernel/cpu/scattered.c
@@ -24,13 +24,13 @@ enum cpuid_regs {
24 CR_EBX 24 CR_EBX
25}; 25};
26 26
27void __cpuinit init_scattered_cpuid_features(struct cpuinfo_x86 *c) 27void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
28{ 28{
29 u32 max_level; 29 u32 max_level;
30 u32 regs[4]; 30 u32 regs[4];
31 const struct cpuid_bit *cb; 31 const struct cpuid_bit *cb;
32 32
33 static const struct cpuid_bit __cpuinitconst cpuid_bits[] = { 33 static const struct cpuid_bit cpuid_bits[] = {
34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 }, 34 { X86_FEATURE_DTHERM, CR_EAX, 0, 0x00000006, 0 },
35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 }, 35 { X86_FEATURE_IDA, CR_EAX, 1, 0x00000006, 0 },
36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 }, 36 { X86_FEATURE_ARAT, CR_EAX, 2, 0x00000006, 0 },
diff --git a/arch/x86/kernel/cpu/topology.c b/arch/x86/kernel/cpu/topology.c
index 4397e987a1cf..4c60eaf0571c 100644
--- a/arch/x86/kernel/cpu/topology.c
+++ b/arch/x86/kernel/cpu/topology.c
@@ -26,7 +26,7 @@
26 * exists, use it for populating initial_apicid and cpu topology 26 * exists, use it for populating initial_apicid and cpu topology
27 * detection. 27 * detection.
28 */ 28 */
29void __cpuinit detect_extended_topology(struct cpuinfo_x86 *c) 29void detect_extended_topology(struct cpuinfo_x86 *c)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 unsigned int eax, ebx, ecx, edx, sub_index; 32 unsigned int eax, ebx, ecx, edx, sub_index;
diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c
index 28000743bbb0..aa0430d69b90 100644
--- a/arch/x86/kernel/cpu/transmeta.c
+++ b/arch/x86/kernel/cpu/transmeta.c
@@ -5,7 +5,7 @@
5#include <asm/msr.h> 5#include <asm/msr.h>
6#include "cpu.h" 6#include "cpu.h"
7 7
8static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c) 8static void early_init_transmeta(struct cpuinfo_x86 *c)
9{ 9{
10 u32 xlvl; 10 u32 xlvl;
11 11
@@ -17,7 +17,7 @@ static void __cpuinit early_init_transmeta(struct cpuinfo_x86 *c)
17 } 17 }
18} 18}
19 19
20static void __cpuinit init_transmeta(struct cpuinfo_x86 *c) 20static void init_transmeta(struct cpuinfo_x86 *c)
21{ 21{
22 unsigned int cap_mask, uk, max, dummy; 22 unsigned int cap_mask, uk, max, dummy;
23 unsigned int cms_rev1, cms_rev2; 23 unsigned int cms_rev1, cms_rev2;
@@ -98,7 +98,7 @@ static void __cpuinit init_transmeta(struct cpuinfo_x86 *c)
98#endif 98#endif
99} 99}
100 100
101static const struct cpu_dev __cpuinitconst transmeta_cpu_dev = { 101static const struct cpu_dev transmeta_cpu_dev = {
102 .c_vendor = "Transmeta", 102 .c_vendor = "Transmeta",
103 .c_ident = { "GenuineTMx86", "TransmetaCPU" }, 103 .c_ident = { "GenuineTMx86", "TransmetaCPU" },
104 .c_early_init = early_init_transmeta, 104 .c_early_init = early_init_transmeta,
diff --git a/arch/x86/kernel/cpu/umc.c b/arch/x86/kernel/cpu/umc.c
index fd2c37bf7acb..202759a14121 100644
--- a/arch/x86/kernel/cpu/umc.c
+++ b/arch/x86/kernel/cpu/umc.c
@@ -8,7 +8,7 @@
8 * so no special init takes place. 8 * so no special init takes place.
9 */ 9 */
10 10
11static const struct cpu_dev __cpuinitconst umc_cpu_dev = { 11static const struct cpu_dev umc_cpu_dev = {
12 .c_vendor = "UMC", 12 .c_vendor = "UMC",
13 .c_ident = { "UMC UMC UMC" }, 13 .c_ident = { "UMC UMC UMC" },
14 .c_models = { 14 .c_models = {
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 03a36321ec54..7076878404ec 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -122,7 +122,7 @@ static bool __init vmware_platform(void)
122 * so that the kernel could just trust the hypervisor with providing a 122 * so that the kernel could just trust the hypervisor with providing a
123 * reliable virtual TSC that is suitable for timekeeping. 123 * reliable virtual TSC that is suitable for timekeeping.
124 */ 124 */
125static void __cpuinit vmware_set_cpu_features(struct cpuinfo_x86 *c) 125static void vmware_set_cpu_features(struct cpuinfo_x86 *c)
126{ 126{
127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); 127 set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE); 128 set_cpu_cap(c, X86_FEATURE_TSC_RELIABLE);
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 1e4dbcfe6d31..7d9481c743f8 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -137,7 +137,7 @@ static const struct file_operations cpuid_fops = {
137 .open = cpuid_open, 137 .open = cpuid_open,
138}; 138};
139 139
140static __cpuinit int cpuid_device_create(int cpu) 140static int cpuid_device_create(int cpu)
141{ 141{
142 struct device *dev; 142 struct device *dev;
143 143
@@ -151,9 +151,8 @@ static void cpuid_device_destroy(int cpu)
151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 151 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
152} 152}
153 153
154static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, 154static int cpuid_class_cpu_callback(struct notifier_block *nfb,
155 unsigned long action, 155 unsigned long action, void *hcpu)
156 void *hcpu)
157{ 156{
158 unsigned int cpu = (unsigned long)hcpu; 157 unsigned int cpu = (unsigned long)hcpu;
159 int err = 0; 158 int err = 0;
diff --git a/arch/x86/kernel/devicetree.c b/arch/x86/kernel/devicetree.c
index 4934890e4db2..69eb2fa25494 100644
--- a/arch/x86/kernel/devicetree.c
+++ b/arch/x86/kernel/devicetree.c
@@ -133,7 +133,7 @@ static void x86_of_pci_irq_disable(struct pci_dev *dev)
133{ 133{
134} 134}
135 135
136void __cpuinit x86_of_pci_init(void) 136void x86_of_pci_init(void)
137{ 137{
138 pcibios_enable_irq = x86_of_pci_irq_enable; 138 pcibios_enable_irq = x86_of_pci_irq_enable;
139 pcibios_disable_irq = x86_of_pci_irq_disable; 139 pcibios_disable_irq = x86_of_pci_irq_disable;
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index e65ddc62e113..5dd87a89f011 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -292,7 +292,6 @@ ENDPROC(start_cpu0)
292 * If cpu hotplug is not supported then this code can go in init section 292 * If cpu hotplug is not supported then this code can go in init section
293 * which will be freed later 293 * which will be freed later
294 */ 294 */
295__CPUINIT
296ENTRY(startup_32_smp) 295ENTRY(startup_32_smp)
297 cld 296 cld
298 movl $(__BOOT_DS),%eax 297 movl $(__BOOT_DS),%eax
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index b627746f6b1a..202d24f0f7e7 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -108,9 +108,9 @@ EXPORT_SYMBOL(unlazy_fpu);
108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu; 108unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
109unsigned int xstate_size; 109unsigned int xstate_size;
110EXPORT_SYMBOL_GPL(xstate_size); 110EXPORT_SYMBOL_GPL(xstate_size);
111static struct i387_fxsave_struct fx_scratch __cpuinitdata; 111static struct i387_fxsave_struct fx_scratch;
112 112
113static void __cpuinit mxcsr_feature_mask_init(void) 113static void mxcsr_feature_mask_init(void)
114{ 114{
115 unsigned long mask = 0; 115 unsigned long mask = 0;
116 116
@@ -124,7 +124,7 @@ static void __cpuinit mxcsr_feature_mask_init(void)
124 mxcsr_feature_mask &= mask; 124 mxcsr_feature_mask &= mask;
125} 125}
126 126
127static void __cpuinit init_thread_xstate(void) 127static void init_thread_xstate(void)
128{ 128{
129 /* 129 /*
130 * Note that xstate_size might be overwriten later during 130 * Note that xstate_size might be overwriten later during
@@ -153,7 +153,7 @@ static void __cpuinit init_thread_xstate(void)
153 * into all processes. 153 * into all processes.
154 */ 154 */
155 155
156void __cpuinit fpu_init(void) 156void fpu_init(void)
157{ 157{
158 unsigned long cr0; 158 unsigned long cr0;
159 unsigned long cr4_mask = 0; 159 unsigned long cr4_mask = 0;
@@ -608,7 +608,7 @@ static int __init no_387(char *s)
608 608
609__setup("no387", no_387); 609__setup("no387", no_387);
610 610
611void __cpuinit fpu_detect(struct cpuinfo_x86 *c) 611void fpu_detect(struct cpuinfo_x86 *c)
612{ 612{
613 unsigned long cr0; 613 unsigned long cr0;
614 u16 fsw, fcw; 614 u16 fsw, fcw;
diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
index 344faf8d0d62..4186755f1d7c 100644
--- a/arch/x86/kernel/irq_32.c
+++ b/arch/x86/kernel/irq_32.c
@@ -119,7 +119,7 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
119/* 119/*
120 * allocate per-cpu stacks for hardirq and for softirq processing 120 * allocate per-cpu stacks for hardirq and for softirq processing
121 */ 121 */
122void __cpuinit irq_ctx_init(int cpu) 122void irq_ctx_init(int cpu)
123{ 123{
124 union irq_ctx *irqctx; 124 union irq_ctx *irqctx;
125 125
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index cd6d9a5a42f6..a96d32cc55b8 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -320,7 +320,7 @@ static void kvm_guest_apic_eoi_write(u32 reg, u32 val)
320 apic_write(APIC_EOI, APIC_EOI_ACK); 320 apic_write(APIC_EOI, APIC_EOI_ACK);
321} 321}
322 322
323void __cpuinit kvm_guest_cpu_init(void) 323void kvm_guest_cpu_init(void)
324{ 324{
325 if (!kvm_para_available()) 325 if (!kvm_para_available())
326 return; 326 return;
@@ -421,7 +421,7 @@ static void __init kvm_smp_prepare_boot_cpu(void)
421 native_smp_prepare_boot_cpu(); 421 native_smp_prepare_boot_cpu();
422} 422}
423 423
424static void __cpuinit kvm_guest_cpu_online(void *dummy) 424static void kvm_guest_cpu_online(void *dummy)
425{ 425{
426 kvm_guest_cpu_init(); 426 kvm_guest_cpu_init();
427} 427}
@@ -435,8 +435,8 @@ static void kvm_guest_cpu_offline(void *dummy)
435 apf_task_wake_all(); 435 apf_task_wake_all();
436} 436}
437 437
438static int __cpuinit kvm_cpu_notify(struct notifier_block *self, 438static int kvm_cpu_notify(struct notifier_block *self, unsigned long action,
439 unsigned long action, void *hcpu) 439 void *hcpu)
440{ 440{
441 int cpu = (unsigned long)hcpu; 441 int cpu = (unsigned long)hcpu;
442 switch (action) { 442 switch (action) {
@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
455 return NOTIFY_OK; 455 return NOTIFY_OK;
456} 456}
457 457
458static struct notifier_block __cpuinitdata kvm_cpu_notifier = { 458static struct notifier_block kvm_cpu_notifier = {
459 .notifier_call = kvm_cpu_notify, 459 .notifier_call = kvm_cpu_notify,
460}; 460};
461#endif 461#endif
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 1f354f4b602b..1570e0741344 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -182,7 +182,7 @@ static void kvm_restore_sched_clock_state(void)
182} 182}
183 183
184#ifdef CONFIG_X86_LOCAL_APIC 184#ifdef CONFIG_X86_LOCAL_APIC
185static void __cpuinit kvm_setup_secondary_clock(void) 185static void kvm_setup_secondary_clock(void)
186{ 186{
187 /* 187 /*
188 * Now that the first cpu already had this clocksource initialized, 188 * Now that the first cpu already had this clocksource initialized,
diff --git a/arch/x86/kernel/microcode_amd_early.c b/arch/x86/kernel/microcode_amd_early.c
index 1ac6e9aee766..1d14ffee5749 100644
--- a/arch/x86/kernel/microcode_amd_early.c
+++ b/arch/x86/kernel/microcode_amd_early.c
@@ -82,7 +82,7 @@ static struct cpio_data __init find_ucode_in_initrd(void)
82 * load_microcode_amd() to save equivalent cpu table and microcode patches in 82 * load_microcode_amd() to save equivalent cpu table and microcode patches in
83 * kernel heap memory. 83 * kernel heap memory.
84 */ 84 */
85static void __cpuinit apply_ucode_in_initrd(void *ucode, size_t size) 85static void apply_ucode_in_initrd(void *ucode, size_t size)
86{ 86{
87 struct equiv_cpu_entry *eq; 87 struct equiv_cpu_entry *eq;
88 u32 *header; 88 u32 *header;
@@ -206,7 +206,7 @@ u8 amd_bsp_mpb[MPB_MAX_SIZE];
206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which 206 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_bsp_mpb, which
207 * is used upon resume from suspend. 207 * is used upon resume from suspend.
208 */ 208 */
209void __cpuinit load_ucode_amd_ap(void) 209void load_ucode_amd_ap(void)
210{ 210{
211 struct microcode_amd *mc; 211 struct microcode_amd *mc;
212 unsigned long *initrd; 212 unsigned long *initrd;
@@ -238,7 +238,7 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
238 uci->cpu_sig.sig = cpuid_eax(0x00000001); 238 uci->cpu_sig.sig = cpuid_eax(0x00000001);
239} 239}
240#else 240#else
241static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c, 241static void collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
242 struct ucode_cpu_info *uci) 242 struct ucode_cpu_info *uci)
243{ 243{
244 u32 rev, eax; 244 u32 rev, eax;
@@ -252,7 +252,7 @@ static void __cpuinit collect_cpu_info_amd_early(struct cpuinfo_x86 *c,
252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff); 252 c->x86 = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
253} 253}
254 254
255void __cpuinit load_ucode_amd_ap(void) 255void load_ucode_amd_ap(void)
256{ 256{
257 unsigned int cpu = smp_processor_id(); 257 unsigned int cpu = smp_processor_id();
258 258
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index 22db92bbdf1a..15c987698b0f 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -468,7 +468,7 @@ static struct syscore_ops mc_syscore_ops = {
468 .resume = mc_bp_resume, 468 .resume = mc_bp_resume,
469}; 469};
470 470
471static __cpuinit int 471static int
472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu) 472mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
473{ 473{
474 unsigned int cpu = (unsigned long)hcpu; 474 unsigned int cpu = (unsigned long)hcpu;
diff --git a/arch/x86/kernel/microcode_core_early.c b/arch/x86/kernel/microcode_core_early.c
index 86119f63db0c..be7f8514f577 100644
--- a/arch/x86/kernel/microcode_core_early.c
+++ b/arch/x86/kernel/microcode_core_early.c
@@ -41,7 +41,7 @@
41 * 41 *
42 * x86_vendor() gets vendor information directly through cpuid. 42 * x86_vendor() gets vendor information directly through cpuid.
43 */ 43 */
44static int __cpuinit x86_vendor(void) 44static int x86_vendor(void)
45{ 45{
46 u32 eax = 0x00000000; 46 u32 eax = 0x00000000;
47 u32 ebx, ecx = 0, edx; 47 u32 ebx, ecx = 0, edx;
@@ -57,7 +57,7 @@ static int __cpuinit x86_vendor(void)
57 return X86_VENDOR_UNKNOWN; 57 return X86_VENDOR_UNKNOWN;
58} 58}
59 59
60static int __cpuinit x86_family(void) 60static int x86_family(void)
61{ 61{
62 u32 eax = 0x00000001; 62 u32 eax = 0x00000001;
63 u32 ebx, ecx = 0, edx; 63 u32 ebx, ecx = 0, edx;
@@ -96,7 +96,7 @@ void __init load_ucode_bsp(void)
96 } 96 }
97} 97}
98 98
99void __cpuinit load_ucode_ap(void) 99void load_ucode_ap(void)
100{ 100{
101 int vendor, x86; 101 int vendor, x86;
102 102
diff --git a/arch/x86/kernel/microcode_intel_early.c b/arch/x86/kernel/microcode_intel_early.c
index dabef95506f3..1575deb2e636 100644
--- a/arch/x86/kernel/microcode_intel_early.c
+++ b/arch/x86/kernel/microcode_intel_early.c
@@ -34,7 +34,7 @@ struct mc_saved_data {
34 struct microcode_intel **mc_saved; 34 struct microcode_intel **mc_saved;
35} mc_saved_data; 35} mc_saved_data;
36 36
37static enum ucode_state __cpuinit 37static enum ucode_state
38generic_load_microcode_early(struct microcode_intel **mc_saved_p, 38generic_load_microcode_early(struct microcode_intel **mc_saved_p,
39 unsigned int mc_saved_count, 39 unsigned int mc_saved_count,
40 struct ucode_cpu_info *uci) 40 struct ucode_cpu_info *uci)
@@ -69,7 +69,7 @@ out:
69 return state; 69 return state;
70} 70}
71 71
72static void __cpuinit 72static void
73microcode_pointer(struct microcode_intel **mc_saved, 73microcode_pointer(struct microcode_intel **mc_saved,
74 unsigned long *mc_saved_in_initrd, 74 unsigned long *mc_saved_in_initrd,
75 unsigned long initrd_start, int mc_saved_count) 75 unsigned long initrd_start, int mc_saved_count)
@@ -82,7 +82,7 @@ microcode_pointer(struct microcode_intel **mc_saved,
82} 82}
83 83
84#ifdef CONFIG_X86_32 84#ifdef CONFIG_X86_32
85static void __cpuinit 85static void
86microcode_phys(struct microcode_intel **mc_saved_tmp, 86microcode_phys(struct microcode_intel **mc_saved_tmp,
87 struct mc_saved_data *mc_saved_data) 87 struct mc_saved_data *mc_saved_data)
88{ 88{
@@ -101,7 +101,7 @@ microcode_phys(struct microcode_intel **mc_saved_tmp,
101} 101}
102#endif 102#endif
103 103
104static enum ucode_state __cpuinit 104static enum ucode_state
105load_microcode(struct mc_saved_data *mc_saved_data, 105load_microcode(struct mc_saved_data *mc_saved_data,
106 unsigned long *mc_saved_in_initrd, 106 unsigned long *mc_saved_in_initrd,
107 unsigned long initrd_start, 107 unsigned long initrd_start,
@@ -375,7 +375,7 @@ do { \
375#define native_wrmsr(msr, low, high) \ 375#define native_wrmsr(msr, low, high) \
376 native_write_msr(msr, low, high); 376 native_write_msr(msr, low, high);
377 377
378static int __cpuinit collect_cpu_info_early(struct ucode_cpu_info *uci) 378static int collect_cpu_info_early(struct ucode_cpu_info *uci)
379{ 379{
380 unsigned int val[2]; 380 unsigned int val[2];
381 u8 x86, x86_model; 381 u8 x86, x86_model;
@@ -584,7 +584,7 @@ scan_microcode(unsigned long start, unsigned long end,
584/* 584/*
585 * Print ucode update info. 585 * Print ucode update info.
586 */ 586 */
587static void __cpuinit 587static void
588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date) 588print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
589{ 589{
590 int cpu = smp_processor_id(); 590 int cpu = smp_processor_id();
@@ -605,7 +605,7 @@ static int current_mc_date;
605/* 605/*
606 * Print early updated ucode info after printk works. This is delayed info dump. 606 * Print early updated ucode info after printk works. This is delayed info dump.
607 */ 607 */
608void __cpuinit show_ucode_info_early(void) 608void show_ucode_info_early(void)
609{ 609{
610 struct ucode_cpu_info uci; 610 struct ucode_cpu_info uci;
611 611
@@ -621,7 +621,7 @@ void __cpuinit show_ucode_info_early(void)
621 * mc_saved_data.mc_saved and delay printing microcode info in 621 * mc_saved_data.mc_saved and delay printing microcode info in
622 * show_ucode_info_early() until printk() works. 622 * show_ucode_info_early() until printk() works.
623 */ 623 */
624static void __cpuinit print_ucode(struct ucode_cpu_info *uci) 624static void print_ucode(struct ucode_cpu_info *uci)
625{ 625{
626 struct microcode_intel *mc_intel; 626 struct microcode_intel *mc_intel;
627 int *delay_ucode_info_p; 627 int *delay_ucode_info_p;
@@ -643,12 +643,12 @@ static void __cpuinit print_ucode(struct ucode_cpu_info *uci)
643 * Flush global tlb. We only do this in x86_64 where paging has been enabled 643 * Flush global tlb. We only do this in x86_64 where paging has been enabled
644 * already and PGE should be enabled as well. 644 * already and PGE should be enabled as well.
645 */ 645 */
646static inline void __cpuinit flush_tlb_early(void) 646static inline void flush_tlb_early(void)
647{ 647{
648 __native_flush_tlb_global_irq_disabled(); 648 __native_flush_tlb_global_irq_disabled();
649} 649}
650 650
651static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci) 651static inline void print_ucode(struct ucode_cpu_info *uci)
652{ 652{
653 struct microcode_intel *mc_intel; 653 struct microcode_intel *mc_intel;
654 654
@@ -660,8 +660,8 @@ static inline void __cpuinit print_ucode(struct ucode_cpu_info *uci)
660} 660}
661#endif 661#endif
662 662
663static int __cpuinit apply_microcode_early(struct mc_saved_data *mc_saved_data, 663static int apply_microcode_early(struct mc_saved_data *mc_saved_data,
664 struct ucode_cpu_info *uci) 664 struct ucode_cpu_info *uci)
665{ 665{
666 struct microcode_intel *mc_intel; 666 struct microcode_intel *mc_intel;
667 unsigned int val[2]; 667 unsigned int val[2];
@@ -763,7 +763,7 @@ load_ucode_intel_bsp(void)
763#endif 763#endif
764} 764}
765 765
766void __cpuinit load_ucode_intel_ap(void) 766void load_ucode_intel_ap(void)
767{ 767{
768 struct mc_saved_data *mc_saved_data_p; 768 struct mc_saved_data *mc_saved_data_p;
769 struct ucode_cpu_info uci; 769 struct ucode_cpu_info uci;
diff --git a/arch/x86/kernel/mmconf-fam10h_64.c b/arch/x86/kernel/mmconf-fam10h_64.c
index ac861b8348e2..f4c886d9165c 100644
--- a/arch/x86/kernel/mmconf-fam10h_64.c
+++ b/arch/x86/kernel/mmconf-fam10h_64.c
@@ -24,14 +24,14 @@ struct pci_hostbridge_probe {
24 u32 device; 24 u32 device;
25}; 25};
26 26
27static u64 __cpuinitdata fam10h_pci_mmconf_base; 27static u64 fam10h_pci_mmconf_base;
28 28
29static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { 29static struct pci_hostbridge_probe pci_probes[] = {
30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, 30 { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 },
31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, 31 { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 },
32}; 32};
33 33
34static int __cpuinit cmp_range(const void *x1, const void *x2) 34static int cmp_range(const void *x1, const void *x2)
35{ 35{
36 const struct range *r1 = x1; 36 const struct range *r1 = x1;
37 const struct range *r2 = x2; 37 const struct range *r2 = x2;
@@ -49,7 +49,7 @@ static int __cpuinit cmp_range(const void *x1, const void *x2)
49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ 49/* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */
50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) 50#define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32)
51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) 51#define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40))
52static void __cpuinit get_fam10h_pci_mmconf_base(void) 52static void get_fam10h_pci_mmconf_base(void)
53{ 53{
54 int i; 54 int i;
55 unsigned bus; 55 unsigned bus;
@@ -166,7 +166,7 @@ out:
166 fam10h_pci_mmconf_base = base; 166 fam10h_pci_mmconf_base = base;
167} 167}
168 168
169void __cpuinit fam10h_check_enable_mmcfg(void) 169void fam10h_check_enable_mmcfg(void)
170{ 170{
171 u64 val; 171 u64 val;
172 u32 address; 172 u32 address;
@@ -230,7 +230,7 @@ static const struct dmi_system_id __initconst mmconf_dmi_table[] = {
230 {} 230 {}
231}; 231};
232 232
233/* Called from a __cpuinit function, but only on the BSP. */ 233/* Called from a non __init function, but only on the BSP. */
234void __ref check_enable_amd_mmconf_dmi(void) 234void __ref check_enable_amd_mmconf_dmi(void)
235{ 235{
236 dmi_check_system(mmconf_dmi_table); 236 dmi_check_system(mmconf_dmi_table);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index ce130493b802..88458faea2f8 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -200,7 +200,7 @@ static const struct file_operations msr_fops = {
200 .compat_ioctl = msr_ioctl, 200 .compat_ioctl = msr_ioctl,
201}; 201};
202 202
203static int __cpuinit msr_device_create(int cpu) 203static int msr_device_create(int cpu)
204{ 204{
205 struct device *dev; 205 struct device *dev;
206 206
@@ -214,8 +214,8 @@ static void msr_device_destroy(int cpu)
214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu)); 214 device_destroy(msr_class, MKDEV(MSR_MAJOR, cpu));
215} 215}
216 216
217static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb, 217static int msr_class_cpu_callback(struct notifier_block *nfb,
218 unsigned long action, void *hcpu) 218 unsigned long action, void *hcpu)
219{ 219{
220 unsigned int cpu = (unsigned long)hcpu; 220 unsigned int cpu = (unsigned long)hcpu;
221 int err = 0; 221 int err = 0;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 81a5f5e8f142..83369e5a1d27 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -398,7 +398,7 @@ static void amd_e400_idle(void)
398 default_idle(); 398 default_idle();
399} 399}
400 400
401void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c) 401void select_idle_routine(const struct cpuinfo_x86 *c)
402{ 402{
403#ifdef CONFIG_SMP 403#ifdef CONFIG_SMP
404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1) 404 if (boot_option_idle_override == IDLE_POLL && smp_num_siblings > 1)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index e68709da8251..f8ec57815c05 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -170,7 +170,7 @@ static struct resource bss_resource = {
170 170
171#ifdef CONFIG_X86_32 171#ifdef CONFIG_X86_32
172/* cpu data as detected by the assembly code in head.S */ 172/* cpu data as detected by the assembly code in head.S */
173struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 173struct cpuinfo_x86 new_cpu_data = {
174 .wp_works_ok = -1, 174 .wp_works_ok = -1,
175}; 175};
176/* common cpu data for all cpus */ 176/* common cpu data for all cpus */
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index bfd348e99369..aecc98a93d1b 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -130,7 +130,7 @@ atomic_t init_deasserted;
130 * Report back to the Boot Processor during boot time or to the caller processor 130 * Report back to the Boot Processor during boot time or to the caller processor
131 * during CPU online. 131 * during CPU online.
132 */ 132 */
133static void __cpuinit smp_callin(void) 133static void smp_callin(void)
134{ 134{
135 int cpuid, phys_id; 135 int cpuid, phys_id;
136 unsigned long timeout; 136 unsigned long timeout;
@@ -237,7 +237,7 @@ static int enable_start_cpu0;
237/* 237/*
238 * Activate a secondary processor. 238 * Activate a secondary processor.
239 */ 239 */
240notrace static void __cpuinit start_secondary(void *unused) 240static void notrace start_secondary(void *unused)
241{ 241{
242 /* 242 /*
243 * Don't put *anything* before cpu_init(), SMP booting is too 243 * Don't put *anything* before cpu_init(), SMP booting is too
@@ -300,7 +300,7 @@ void __init smp_store_boot_cpu_info(void)
300 * The bootstrap kernel entry code has set these up. Save them for 300 * The bootstrap kernel entry code has set these up. Save them for
301 * a given CPU 301 * a given CPU
302 */ 302 */
303void __cpuinit smp_store_cpu_info(int id) 303void smp_store_cpu_info(int id)
304{ 304{
305 struct cpuinfo_x86 *c = &cpu_data(id); 305 struct cpuinfo_x86 *c = &cpu_data(id);
306 306
@@ -313,7 +313,7 @@ void __cpuinit smp_store_cpu_info(int id)
313 identify_secondary_cpu(c); 313 identify_secondary_cpu(c);
314} 314}
315 315
316static bool __cpuinit 316static bool
317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) 317topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
318{ 318{
319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 319 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -330,7 +330,7 @@ do { \
330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \ 330 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
331} while (0) 331} while (0)
332 332
333static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 333static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
334{ 334{
335 if (cpu_has_topoext) { 335 if (cpu_has_topoext) {
336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 336 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
@@ -348,7 +348,7 @@ static bool __cpuinit match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
348 return false; 348 return false;
349} 349}
350 350
351static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 351static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
352{ 352{
353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index; 353 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
354 354
@@ -359,7 +359,7 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
359 return false; 359 return false;
360} 360}
361 361
362static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 362static bool match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
363{ 363{
364 if (c->phys_proc_id == o->phys_proc_id) { 364 if (c->phys_proc_id == o->phys_proc_id) {
365 if (cpu_has(c, X86_FEATURE_AMD_DCM)) 365 if (cpu_has(c, X86_FEATURE_AMD_DCM))
@@ -370,7 +370,7 @@ static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
370 return false; 370 return false;
371} 371}
372 372
373void __cpuinit set_cpu_sibling_map(int cpu) 373void set_cpu_sibling_map(int cpu)
374{ 374{
375 bool has_smt = smp_num_siblings > 1; 375 bool has_smt = smp_num_siblings > 1;
376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; 376 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
@@ -499,7 +499,7 @@ void __inquire_remote_apic(int apicid)
499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 499 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
500 * won't ... remember to clear down the APIC, etc later. 500 * won't ... remember to clear down the APIC, etc later.
501 */ 501 */
502int __cpuinit 502int
503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip) 503wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
504{ 504{
505 unsigned long send_status, accept_status = 0; 505 unsigned long send_status, accept_status = 0;
@@ -533,7 +533,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
533 return (send_status | accept_status); 533 return (send_status | accept_status);
534} 534}
535 535
536static int __cpuinit 536static int
537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip) 537wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
538{ 538{
539 unsigned long send_status, accept_status = 0; 539 unsigned long send_status, accept_status = 0;
@@ -649,7 +649,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
649} 649}
650 650
651/* reduce the number of lines printed when booting a large cpu count system */ 651/* reduce the number of lines printed when booting a large cpu count system */
652static void __cpuinit announce_cpu(int cpu, int apicid) 652static void announce_cpu(int cpu, int apicid)
653{ 653{
654 static int current_node = -1; 654 static int current_node = -1;
655 int node = early_cpu_to_node(cpu); 655 int node = early_cpu_to_node(cpu);
@@ -691,7 +691,7 @@ static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
691 * We'll change this code in the future to wake up hard offlined CPU0 if 691 * We'll change this code in the future to wake up hard offlined CPU0 if
692 * real platform and request are available. 692 * real platform and request are available.
693 */ 693 */
694static int __cpuinit 694static int
695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid, 695wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
696 int *cpu0_nmi_registered) 696 int *cpu0_nmi_registered)
697{ 697{
@@ -731,7 +731,7 @@ wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
731 * Returns zero if CPU booted OK, else error code from 731 * Returns zero if CPU booted OK, else error code from
732 * ->wakeup_secondary_cpu. 732 * ->wakeup_secondary_cpu.
733 */ 733 */
734static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 734static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
735{ 735{
736 volatile u32 *trampoline_status = 736 volatile u32 *trampoline_status =
737 (volatile u32 *) __va(real_mode_header->trampoline_status); 737 (volatile u32 *) __va(real_mode_header->trampoline_status);
@@ -872,7 +872,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
872 return boot_error; 872 return boot_error;
873} 873}
874 874
875int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle) 875int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
876{ 876{
877 int apicid = apic->cpu_present_to_apicid(cpu); 877 int apicid = apic->cpu_present_to_apicid(cpu);
878 unsigned long flags; 878 unsigned long flags;
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 3ff42d2f046d..addf7b58f4e8 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -320,8 +320,8 @@ static int tboot_wait_for_aps(int num_aps)
320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps); 320 return !(atomic_read((atomic_t *)&tboot->num_in_wfs) == num_aps);
321} 321}
322 322
323static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb, 323static int tboot_cpu_callback(struct notifier_block *nfb, unsigned long action,
324 unsigned long action, void *hcpu) 324 void *hcpu)
325{ 325{
326 switch (action) { 326 switch (action) {
327 case CPU_DYING: 327 case CPU_DYING:
@@ -334,7 +334,7 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
334 return NOTIFY_OK; 334 return NOTIFY_OK;
335} 335}
336 336
337static struct notifier_block tboot_cpu_notifier __cpuinitdata = 337static struct notifier_block tboot_cpu_notifier =
338{ 338{
339 .notifier_call = tboot_cpu_callback, 339 .notifier_call = tboot_cpu_callback,
340}; 340};
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 098b3cfda72e..6ff49247edf8 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -824,7 +824,7 @@ static void __init check_system_tsc_reliable(void)
824 * Make an educated guess if the TSC is trustworthy and synchronized 824 * Make an educated guess if the TSC is trustworthy and synchronized
825 * over all CPUs. 825 * over all CPUs.
826 */ 826 */
827__cpuinit int unsynchronized_tsc(void) 827int unsynchronized_tsc(void)
828{ 828{
829 if (!cpu_has_tsc || tsc_unstable) 829 if (!cpu_has_tsc || tsc_unstable)
830 return 1; 830 return 1;
@@ -1020,7 +1020,7 @@ void __init tsc_init(void)
1020 * been calibrated. This assumes that CONSTANT_TSC applies to all 1020 * been calibrated. This assumes that CONSTANT_TSC applies to all
1021 * cpus in the socket - this should be a safe assumption. 1021 * cpus in the socket - this should be a safe assumption.
1022 */ 1022 */
1023unsigned long __cpuinit calibrate_delay_is_known(void) 1023unsigned long calibrate_delay_is_known(void)
1024{ 1024{
1025 int i, cpu = smp_processor_id(); 1025 int i, cpu = smp_processor_id();
1026 1026
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index fc25e60a5884..adfdf56a3714 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -25,24 +25,24 @@
25 * Entry/exit counters that make sure that both CPUs 25 * Entry/exit counters that make sure that both CPUs
26 * run the measurement code at once: 26 * run the measurement code at once:
27 */ 27 */
28static __cpuinitdata atomic_t start_count; 28static atomic_t start_count;
29static __cpuinitdata atomic_t stop_count; 29static atomic_t stop_count;
30 30
31/* 31/*
32 * We use a raw spinlock in this exceptional case, because 32 * We use a raw spinlock in this exceptional case, because
33 * we want to have the fastest, inlined, non-debug version 33 * we want to have the fastest, inlined, non-debug version
34 * of a critical section, to be able to prove TSC time-warps: 34 * of a critical section, to be able to prove TSC time-warps:
35 */ 35 */
36static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; 36static arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED;
37 37
38static __cpuinitdata cycles_t last_tsc; 38static cycles_t last_tsc;
39static __cpuinitdata cycles_t max_warp; 39static cycles_t max_warp;
40static __cpuinitdata int nr_warps; 40static int nr_warps;
41 41
42/* 42/*
43 * TSC-warp measurement loop running on both CPUs: 43 * TSC-warp measurement loop running on both CPUs:
44 */ 44 */
45static __cpuinit void check_tsc_warp(unsigned int timeout) 45static void check_tsc_warp(unsigned int timeout)
46{ 46{
47 cycles_t start, now, prev, end; 47 cycles_t start, now, prev, end;
48 int i; 48 int i;
@@ -121,7 +121,7 @@ static inline unsigned int loop_timeout(int cpu)
121 * Source CPU calls into this - it waits for the freshly booted 121 * Source CPU calls into this - it waits for the freshly booted
122 * target CPU to arrive and then starts the measurement: 122 * target CPU to arrive and then starts the measurement:
123 */ 123 */
124void __cpuinit check_tsc_sync_source(int cpu) 124void check_tsc_sync_source(int cpu)
125{ 125{
126 int cpus = 2; 126 int cpus = 2;
127 127
@@ -187,7 +187,7 @@ void __cpuinit check_tsc_sync_source(int cpu)
187/* 187/*
188 * Freshly booted CPUs call into this: 188 * Freshly booted CPUs call into this:
189 */ 189 */
190void __cpuinit check_tsc_sync_target(void) 190void check_tsc_sync_target(void)
191{ 191{
192 int cpus = 2; 192 int cpus = 2;
193 193
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 9a907a67be8f..1f96f9347ed9 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -331,7 +331,7 @@ sigsegv:
331 * Assume __initcall executes before all user space. Hopefully kmod 331 * Assume __initcall executes before all user space. Hopefully kmod
332 * doesn't violate that. We'll find out if it does. 332 * doesn't violate that. We'll find out if it does.
333 */ 333 */
334static void __cpuinit vsyscall_set_cpu(int cpu) 334static void vsyscall_set_cpu(int cpu)
335{ 335{
336 unsigned long d; 336 unsigned long d;
337 unsigned long node = 0; 337 unsigned long node = 0;
@@ -353,13 +353,13 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S); 353 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
354} 354}
355 355
356static void __cpuinit cpu_vsyscall_init(void *arg) 356static void cpu_vsyscall_init(void *arg)
357{ 357{
358 /* preemption should be already off */ 358 /* preemption should be already off */
359 vsyscall_set_cpu(raw_smp_processor_id()); 359 vsyscall_set_cpu(raw_smp_processor_id());
360} 360}
361 361
362static int __cpuinit 362static int
363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg) 363cpu_vsyscall_notifier(struct notifier_block *n, unsigned long action, void *arg)
364{ 364{
365 long cpu = (long)arg; 365 long cpu = (long)arg;
diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
index 45a14dbbddaf..5f24c71accaa 100644
--- a/arch/x86/kernel/x86_init.c
+++ b/arch/x86/kernel/x86_init.c
@@ -25,7 +25,7 @@
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26#include <asm/mach_traps.h> 26#include <asm/mach_traps.h>
27 27
28void __cpuinit x86_init_noop(void) { } 28void x86_init_noop(void) { }
29void __init x86_init_uint_noop(unsigned int unused) { } 29void __init x86_init_uint_noop(unsigned int unused) { }
30int __init iommu_init_noop(void) { return 0; } 30int __init iommu_init_noop(void) { return 0; }
31void iommu_shutdown_noop(void) { } 31void iommu_shutdown_noop(void) { }
@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
85 }, 85 },
86}; 86};
87 87
88struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = { 88struct x86_cpuinit_ops x86_cpuinit = {
89 .early_percpu_clock_init = x86_init_noop, 89 .early_percpu_clock_init = x86_init_noop,
90 .setup_percpu_clockev = setup_secondary_APIC_clock, 90 .setup_percpu_clockev = setup_secondary_APIC_clock,
91}; 91};
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
index d6c28acdf99c..422fd8223470 100644
--- a/arch/x86/kernel/xsave.c
+++ b/arch/x86/kernel/xsave.c
@@ -573,7 +573,7 @@ static void __init xstate_enable_boot_cpu(void)
573 * This is somewhat obfuscated due to the lack of powerful enough 573 * This is somewhat obfuscated due to the lack of powerful enough
574 * overrides for the section checks. 574 * overrides for the section checks.
575 */ 575 */
576void __cpuinit xsave_init(void) 576void xsave_init(void)
577{ 577{
578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu; 578 static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
579 void (*this_func)(void); 579 void (*this_func)(void);
@@ -594,7 +594,7 @@ static inline void __init eager_fpu_init_bp(void)
594 setup_init_fpu_buf(); 594 setup_init_fpu_buf();
595} 595}
596 596
597void __cpuinit eager_fpu_init(void) 597void eager_fpu_init(void)
598{ 598{
599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp; 599 static __refdata void (*boot_func)(void) = eager_fpu_init_bp;
600 600