aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-18 13:50:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-18 13:50:26 -0400
commit3f334c20811d2970cbe14dbd2db3c08da0220fe8 (patch)
tree823577e6c7cb2fddfa5330b02d1cb29281838207 /kernel
parentc66bce9b655aac0869254dcaefd2365145477f53 (diff)
parent0b776b062843b63cb4c9acdfc092b2581be3c2f6 (diff)
Merge branch 'cpuinit_phase2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulg/linux
Pull phase two of __cpuinit removal from Paul Gortmaker: "With the __cpuinit infrastructure removed earlier, this group of commits only removes the function/data tagging that was done with the various (now no-op) __cpuinit related prefixes. Now that the dust has settled with yesterday's v3.11-rc1, there hopefully shouldn't be any new users leaking back in tree, but I think we can leave the harmless no-op stubs there for a release as a courtesy to those who still have out of tree stuff and weren't paying attention. Although the commits are against the recent tag to allow for minor context refreshes for things like yesterday's v3.11-rc1~ slab content, the patches have been largely unchanged for weeks, aside from such trivial updates. For detail junkies, the largely boring and mostly irrelevant history of the patches can be viewed at: http://git.kernel.org/cgit/linux/kernel/git/paulg/cpuinit-delete.git If nothing else, I guess it does at least demonstrate the level of involvement required to shepherd such a treewide change to completion. This is the same repository of patches that has been applied to the end of the daily linux-next branches for the past several weeks" * 'cpuinit_phase2' of git://git.kernel.org/pub/scm/linux/kernel/git/paulg/linux: (28 commits) block: delete __cpuinit usage from all block files drivers: delete __cpuinit usage from all remaining drivers files kernel: delete __cpuinit usage from all core kernel files rcu: delete __cpuinit usage from all rcu files net: delete __cpuinit usage from all net files acpi: delete __cpuinit usage from all acpi files hwmon: delete __cpuinit usage from all hwmon files cpufreq: delete __cpuinit usage from all cpufreq files clocksource+irqchip: delete __cpuinit usage from all related files x86: delete __cpuinit usage from all x86 files score: delete __cpuinit usage from all score files xtensa: delete __cpuinit usage from all xtensa files openrisc: delete __cpuinit usage from all openrisc files m32r: delete __cpuinit usage from all m32r files hexagon: delete __cpuinit usage from all hexagon files frv: delete __cpuinit usage from all frv files cris: delete __cpuinit usage from all cris files metag: delete __cpuinit usage from all metag files tile: delete __cpuinit usage from all tile files sh: delete __cpuinit usage from all sh files ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c6
-rw-r--r--kernel/events/core.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/printk.c2
-rw-r--r--kernel/profile.c2
-rw-r--r--kernel/rcutorture.c6
-rw-r--r--kernel/rcutree.c6
-rw-r--r--kernel/rcutree.h4
-rw-r--r--kernel/rcutree_plugin.h6
-rw-r--r--kernel/relay.c2
-rw-r--r--kernel/sched/core.c12
-rw-r--r--kernel/sched/fair.c2
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/smpboot.c2
-rw-r--r--kernel/softirq.c8
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/timer.c10
-rw-r--r--kernel/workqueue.c4
19 files changed, 44 insertions, 44 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 198a38883e64..b2b227b82123 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -366,7 +366,7 @@ EXPORT_SYMBOL(cpu_down);
366#endif /*CONFIG_HOTPLUG_CPU*/ 366#endif /*CONFIG_HOTPLUG_CPU*/
367 367
368/* Requires cpu_add_remove_lock to be held */ 368/* Requires cpu_add_remove_lock to be held */
369static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) 369static int _cpu_up(unsigned int cpu, int tasks_frozen)
370{ 370{
371 int ret, nr_calls = 0; 371 int ret, nr_calls = 0;
372 void *hcpu = (void *)(long)cpu; 372 void *hcpu = (void *)(long)cpu;
@@ -419,7 +419,7 @@ out:
419 return ret; 419 return ret;
420} 420}
421 421
422int __cpuinit cpu_up(unsigned int cpu) 422int cpu_up(unsigned int cpu)
423{ 423{
424 int err = 0; 424 int err = 0;
425 425
@@ -618,7 +618,7 @@ core_initcall(cpu_hotplug_pm_sync_init);
618 * It must be called by the arch code on the new cpu, before the new cpu 618 * It must be called by the arch code on the new cpu, before the new cpu
619 * enables interrupts and before the "boot" cpu returns from __cpu_up(). 619 * enables interrupts and before the "boot" cpu returns from __cpu_up().
620 */ 620 */
621void __cpuinit notify_cpu_starting(unsigned int cpu) 621void notify_cpu_starting(unsigned int cpu)
622{ 622{
623 unsigned long val = CPU_STARTING; 623 unsigned long val = CPU_STARTING;
624 624
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eba8fb5834ae..f3e9dce39bc9 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7630,7 +7630,7 @@ static void __init perf_event_init_all_cpus(void)
7630 } 7630 }
7631} 7631}
7632 7632
7633static void __cpuinit perf_event_init_cpu(int cpu) 7633static void perf_event_init_cpu(int cpu)
7634{ 7634{
7635 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); 7635 struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
7636 7636
@@ -7719,7 +7719,7 @@ static struct notifier_block perf_reboot_notifier = {
7719 .priority = INT_MIN, 7719 .priority = INT_MIN,
7720}; 7720};
7721 7721
7722static int __cpuinit 7722static int
7723perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) 7723perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
7724{ 7724{
7725 unsigned int cpu = (long)hcpu; 7725 unsigned int cpu = (long)hcpu;
diff --git a/kernel/fork.c b/kernel/fork.c
index 66635c80a813..403d2bb8a968 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1546,7 +1546,7 @@ static inline void init_idle_pids(struct pid_link *links)
1546 } 1546 }
1547} 1547}
1548 1548
1549struct task_struct * __cpuinit fork_idle(int cpu) 1549struct task_struct *fork_idle(int cpu)
1550{ 1550{
1551 struct task_struct *task; 1551 struct task_struct *task;
1552 task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); 1552 task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index f0f4fe29cd21..383319bae3f7 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1659,7 +1659,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp,
1659/* 1659/*
1660 * Functions related to boot-time initialization: 1660 * Functions related to boot-time initialization:
1661 */ 1661 */
1662static void __cpuinit init_hrtimers_cpu(int cpu) 1662static void init_hrtimers_cpu(int cpu)
1663{ 1663{
1664 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); 1664 struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu);
1665 int i; 1665 int i;
@@ -1740,7 +1740,7 @@ static void migrate_hrtimers(int scpu)
1740 1740
1741#endif /* CONFIG_HOTPLUG_CPU */ 1741#endif /* CONFIG_HOTPLUG_CPU */
1742 1742
1743static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, 1743static int hrtimer_cpu_notify(struct notifier_block *self,
1744 unsigned long action, void *hcpu) 1744 unsigned long action, void *hcpu)
1745{ 1745{
1746 int scpu = (long)hcpu; 1746 int scpu = (long)hcpu;
@@ -1773,7 +1773,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
1773 return NOTIFY_OK; 1773 return NOTIFY_OK;
1774} 1774}
1775 1775
1776static struct notifier_block __cpuinitdata hrtimers_nb = { 1776static struct notifier_block hrtimers_nb = {
1777 .notifier_call = hrtimer_cpu_notify, 1777 .notifier_call = hrtimer_cpu_notify,
1778}; 1778};
1779 1779
diff --git a/kernel/printk.c b/kernel/printk.c
index d37d45c90ae6..69b0890ed7e5 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -1921,7 +1921,7 @@ void resume_console(void)
1921 * called when a new CPU comes online (or fails to come up), and ensures 1921 * called when a new CPU comes online (or fails to come up), and ensures
1922 * that any such output gets printed. 1922 * that any such output gets printed.
1923 */ 1923 */
1924static int __cpuinit console_cpu_notify(struct notifier_block *self, 1924static int console_cpu_notify(struct notifier_block *self,
1925 unsigned long action, void *hcpu) 1925 unsigned long action, void *hcpu)
1926{ 1926{
1927 switch (action) { 1927 switch (action) {
diff --git a/kernel/profile.c b/kernel/profile.c
index 0bf400737660..6631e1ef55ab 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -331,7 +331,7 @@ out:
331 put_cpu(); 331 put_cpu();
332} 332}
333 333
334static int __cpuinit profile_cpu_callback(struct notifier_block *info, 334static int profile_cpu_callback(struct notifier_block *info,
335 unsigned long action, void *__cpu) 335 unsigned long action, void *__cpu)
336{ 336{
337 int node, cpu = (unsigned long)__cpu; 337 int node, cpu = (unsigned long)__cpu;
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index b1fa5510388d..f4871e52c546 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -1476,7 +1476,7 @@ rcu_torture_shutdown(void *arg)
1476 * Execute random CPU-hotplug operations at the interval specified 1476 * Execute random CPU-hotplug operations at the interval specified
1477 * by the onoff_interval. 1477 * by the onoff_interval.
1478 */ 1478 */
1479static int __cpuinit 1479static int
1480rcu_torture_onoff(void *arg) 1480rcu_torture_onoff(void *arg)
1481{ 1481{
1482 int cpu; 1482 int cpu;
@@ -1558,7 +1558,7 @@ rcu_torture_onoff(void *arg)
1558 return 0; 1558 return 0;
1559} 1559}
1560 1560
1561static int __cpuinit 1561static int
1562rcu_torture_onoff_init(void) 1562rcu_torture_onoff_init(void)
1563{ 1563{
1564 int ret; 1564 int ret;
@@ -1601,7 +1601,7 @@ static void rcu_torture_onoff_cleanup(void)
1601 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then 1601 * CPU-stall kthread. It waits as specified by stall_cpu_holdoff, then
1602 * induces a CPU stall for the time specified by stall_cpu. 1602 * induces a CPU stall for the time specified by stall_cpu.
1603 */ 1603 */
1604static int __cpuinit rcu_torture_stall(void *args) 1604static int rcu_torture_stall(void *args)
1605{ 1605{
1606 unsigned long stop_at; 1606 unsigned long stop_at;
1607 1607
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index e08abb9461ac..068de3a93606 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -2910,7 +2910,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
2910 * can accept some slop in the rsp->completed access due to the fact 2910 * can accept some slop in the rsp->completed access due to the fact
2911 * that this CPU cannot possibly have any RCU callbacks in flight yet. 2911 * that this CPU cannot possibly have any RCU callbacks in flight yet.
2912 */ 2912 */
2913static void __cpuinit 2913static void
2914rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) 2914rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2915{ 2915{
2916 unsigned long flags; 2916 unsigned long flags;
@@ -2962,7 +2962,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
2962 mutex_unlock(&rsp->onoff_mutex); 2962 mutex_unlock(&rsp->onoff_mutex);
2963} 2963}
2964 2964
2965static void __cpuinit rcu_prepare_cpu(int cpu) 2965static void rcu_prepare_cpu(int cpu)
2966{ 2966{
2967 struct rcu_state *rsp; 2967 struct rcu_state *rsp;
2968 2968
@@ -2974,7 +2974,7 @@ static void __cpuinit rcu_prepare_cpu(int cpu)
2974/* 2974/*
2975 * Handle CPU online/offline notification events. 2975 * Handle CPU online/offline notification events.
2976 */ 2976 */
2977static int __cpuinit rcu_cpu_notify(struct notifier_block *self, 2977static int rcu_cpu_notify(struct notifier_block *self,
2978 unsigned long action, void *hcpu) 2978 unsigned long action, void *hcpu)
2979{ 2979{
2980 long cpu = (long)hcpu; 2980 long cpu = (long)hcpu;
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 4a39d364493c..b3832581043c 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -521,10 +521,10 @@ static void invoke_rcu_callbacks_kthread(void);
521static bool rcu_is_callbacks_kthread(void); 521static bool rcu_is_callbacks_kthread(void);
522#ifdef CONFIG_RCU_BOOST 522#ifdef CONFIG_RCU_BOOST
523static void rcu_preempt_do_callbacks(void); 523static void rcu_preempt_do_callbacks(void);
524static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 524static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
525 struct rcu_node *rnp); 525 struct rcu_node *rnp);
526#endif /* #ifdef CONFIG_RCU_BOOST */ 526#endif /* #ifdef CONFIG_RCU_BOOST */
527static void __cpuinit rcu_prepare_kthreads(int cpu); 527static void rcu_prepare_kthreads(int cpu);
528static void rcu_cleanup_after_idle(int cpu); 528static void rcu_cleanup_after_idle(int cpu);
529static void rcu_prepare_for_idle(int cpu); 529static void rcu_prepare_for_idle(int cpu);
530static void rcu_idle_count_callbacks_posted(void); 530static void rcu_idle_count_callbacks_posted(void);
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 63098a59216e..769e12e3151b 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1352,7 +1352,7 @@ static void rcu_preempt_boost_start_gp(struct rcu_node *rnp)
1352 * already exist. We only create this kthread for preemptible RCU. 1352 * already exist. We only create this kthread for preemptible RCU.
1353 * Returns zero if all is well, a negated errno otherwise. 1353 * Returns zero if all is well, a negated errno otherwise.
1354 */ 1354 */
1355static int __cpuinit rcu_spawn_one_boost_kthread(struct rcu_state *rsp, 1355static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
1356 struct rcu_node *rnp) 1356 struct rcu_node *rnp)
1357{ 1357{
1358 int rnp_index = rnp - &rsp->node[0]; 1358 int rnp_index = rnp - &rsp->node[0];
@@ -1507,7 +1507,7 @@ static int __init rcu_spawn_kthreads(void)
1507} 1507}
1508early_initcall(rcu_spawn_kthreads); 1508early_initcall(rcu_spawn_kthreads);
1509 1509
1510static void __cpuinit rcu_prepare_kthreads(int cpu) 1510static void rcu_prepare_kthreads(int cpu)
1511{ 1511{
1512 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); 1512 struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
1513 struct rcu_node *rnp = rdp->mynode; 1513 struct rcu_node *rnp = rdp->mynode;
@@ -1549,7 +1549,7 @@ static int __init rcu_scheduler_really_started(void)
1549} 1549}
1550early_initcall(rcu_scheduler_really_started); 1550early_initcall(rcu_scheduler_really_started);
1551 1551
1552static void __cpuinit rcu_prepare_kthreads(int cpu) 1552static void rcu_prepare_kthreads(int cpu)
1553{ 1553{
1554} 1554}
1555 1555
diff --git a/kernel/relay.c b/kernel/relay.c
index b91488ba2e5a..5001c9887db1 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -516,7 +516,7 @@ static void setup_callbacks(struct rchan *chan,
516 * 516 *
517 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) 517 * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD)
518 */ 518 */
519static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, 519static int relay_hotcpu_callback(struct notifier_block *nb,
520 unsigned long action, 520 unsigned long action,
521 void *hcpu) 521 void *hcpu)
522{ 522{
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0d8eb4525e76..b7c32cb7bfeb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4133,7 +4133,7 @@ void show_state_filter(unsigned long state_filter)
4133 debug_show_all_locks(); 4133 debug_show_all_locks();
4134} 4134}
4135 4135
4136void __cpuinit init_idle_bootup_task(struct task_struct *idle) 4136void init_idle_bootup_task(struct task_struct *idle)
4137{ 4137{
4138 idle->sched_class = &idle_sched_class; 4138 idle->sched_class = &idle_sched_class;
4139} 4139}
@@ -4146,7 +4146,7 @@ void __cpuinit init_idle_bootup_task(struct task_struct *idle)
4146 * NOTE: this function does not set the idle thread's NEED_RESCHED 4146 * NOTE: this function does not set the idle thread's NEED_RESCHED
4147 * flag, to make booting more robust. 4147 * flag, to make booting more robust.
4148 */ 4148 */
4149void __cpuinit init_idle(struct task_struct *idle, int cpu) 4149void init_idle(struct task_struct *idle, int cpu)
4150{ 4150{
4151 struct rq *rq = cpu_rq(cpu); 4151 struct rq *rq = cpu_rq(cpu);
4152 unsigned long flags; 4152 unsigned long flags;
@@ -4630,7 +4630,7 @@ static void set_rq_offline(struct rq *rq)
4630 * migration_call - callback that gets triggered when a CPU is added. 4630 * migration_call - callback that gets triggered when a CPU is added.
4631 * Here we can start up the necessary migration thread for the new CPU. 4631 * Here we can start up the necessary migration thread for the new CPU.
4632 */ 4632 */
4633static int __cpuinit 4633static int
4634migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) 4634migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
4635{ 4635{
4636 int cpu = (long)hcpu; 4636 int cpu = (long)hcpu;
@@ -4684,12 +4684,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
4684 * happens before everything else. This has to be lower priority than 4684 * happens before everything else. This has to be lower priority than
4685 * the notifier in the perf_event subsystem, though. 4685 * the notifier in the perf_event subsystem, though.
4686 */ 4686 */
4687static struct notifier_block __cpuinitdata migration_notifier = { 4687static struct notifier_block migration_notifier = {
4688 .notifier_call = migration_call, 4688 .notifier_call = migration_call,
4689 .priority = CPU_PRI_MIGRATION, 4689 .priority = CPU_PRI_MIGRATION,
4690}; 4690};
4691 4691
4692static int __cpuinit sched_cpu_active(struct notifier_block *nfb, 4692static int sched_cpu_active(struct notifier_block *nfb,
4693 unsigned long action, void *hcpu) 4693 unsigned long action, void *hcpu)
4694{ 4694{
4695 switch (action & ~CPU_TASKS_FROZEN) { 4695 switch (action & ~CPU_TASKS_FROZEN) {
@@ -4702,7 +4702,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb,
4702 } 4702 }
4703} 4703}
4704 4704
4705static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, 4705static int sched_cpu_inactive(struct notifier_block *nfb,
4706 unsigned long action, void *hcpu) 4706 unsigned long action, void *hcpu)
4707{ 4707{
4708 switch (action & ~CPU_TASKS_FROZEN) { 4708 switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f77f9c527449..bb456f44b7b1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5506,7 +5506,7 @@ void nohz_balance_enter_idle(int cpu)
5506 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); 5506 set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
5507} 5507}
5508 5508
5509static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, 5509static int sched_ilb_notifier(struct notifier_block *nfb,
5510 unsigned long action, void *hcpu) 5510 unsigned long action, void *hcpu)
5511{ 5511{
5512 switch (action & ~CPU_TASKS_FROZEN) { 5512 switch (action & ~CPU_TASKS_FROZEN) {
diff --git a/kernel/smp.c b/kernel/smp.c
index 4dba0f7b72ad..fe9f773d7114 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
73 return NOTIFY_OK; 73 return NOTIFY_OK;
74} 74}
75 75
76static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { 76static struct notifier_block hotplug_cfd_notifier = {
77 .notifier_call = hotplug_cfd, 77 .notifier_call = hotplug_cfd,
78}; 78};
79 79
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index 02fc5c933673..eb89e1807408 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -24,7 +24,7 @@
24 */ 24 */
25static DEFINE_PER_CPU(struct task_struct *, idle_threads); 25static DEFINE_PER_CPU(struct task_struct *, idle_threads);
26 26
27struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) 27struct task_struct *idle_thread_get(unsigned int cpu)
28{ 28{
29 struct task_struct *tsk = per_cpu(idle_threads, cpu); 29 struct task_struct *tsk = per_cpu(idle_threads, cpu);
30 30
diff --git a/kernel/softirq.c b/kernel/softirq.c
index ca25e6e704a2..be3d3514c325 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -699,7 +699,7 @@ void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
699} 699}
700EXPORT_SYMBOL(send_remote_softirq); 700EXPORT_SYMBOL(send_remote_softirq);
701 701
702static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, 702static int remote_softirq_cpu_notify(struct notifier_block *self,
703 unsigned long action, void *hcpu) 703 unsigned long action, void *hcpu)
704{ 704{
705 /* 705 /*
@@ -728,7 +728,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
728 return NOTIFY_OK; 728 return NOTIFY_OK;
729} 729}
730 730
731static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { 731static struct notifier_block remote_softirq_cpu_notifier = {
732 .notifier_call = remote_softirq_cpu_notify, 732 .notifier_call = remote_softirq_cpu_notify,
733}; 733};
734 734
@@ -830,7 +830,7 @@ static void takeover_tasklets(unsigned int cpu)
830} 830}
831#endif /* CONFIG_HOTPLUG_CPU */ 831#endif /* CONFIG_HOTPLUG_CPU */
832 832
833static int __cpuinit cpu_callback(struct notifier_block *nfb, 833static int cpu_callback(struct notifier_block *nfb,
834 unsigned long action, 834 unsigned long action,
835 void *hcpu) 835 void *hcpu)
836{ 836{
@@ -845,7 +845,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
845 return NOTIFY_OK; 845 return NOTIFY_OK;
846} 846}
847 847
848static struct notifier_block __cpuinitdata cpu_nfb = { 848static struct notifier_block cpu_nfb = {
849 .notifier_call = cpu_callback 849 .notifier_call = cpu_callback
850}; 850};
851 851
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 69601726a745..e80183f4a6c4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -298,7 +298,7 @@ static int __init tick_nohz_full_setup(char *str)
298} 298}
299__setup("nohz_full=", tick_nohz_full_setup); 299__setup("nohz_full=", tick_nohz_full_setup);
300 300
301static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, 301static int tick_nohz_cpu_down_callback(struct notifier_block *nfb,
302 unsigned long action, 302 unsigned long action,
303 void *hcpu) 303 void *hcpu)
304{ 304{
diff --git a/kernel/timer.c b/kernel/timer.c
index 15bc1b41021d..4296d13db3d1 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1505,11 +1505,11 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1505} 1505}
1506EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1506EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1507 1507
1508static int __cpuinit init_timers_cpu(int cpu) 1508static int init_timers_cpu(int cpu)
1509{ 1509{
1510 int j; 1510 int j;
1511 struct tvec_base *base; 1511 struct tvec_base *base;
1512 static char __cpuinitdata tvec_base_done[NR_CPUS]; 1512 static char tvec_base_done[NR_CPUS];
1513 1513
1514 if (!tvec_base_done[cpu]) { 1514 if (!tvec_base_done[cpu]) {
1515 static char boot_done; 1515 static char boot_done;
@@ -1577,7 +1577,7 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea
1577 } 1577 }
1578} 1578}
1579 1579
1580static void __cpuinit migrate_timers(int cpu) 1580static void migrate_timers(int cpu)
1581{ 1581{
1582 struct tvec_base *old_base; 1582 struct tvec_base *old_base;
1583 struct tvec_base *new_base; 1583 struct tvec_base *new_base;
@@ -1610,7 +1610,7 @@ static void __cpuinit migrate_timers(int cpu)
1610} 1610}
1611#endif /* CONFIG_HOTPLUG_CPU */ 1611#endif /* CONFIG_HOTPLUG_CPU */
1612 1612
1613static int __cpuinit timer_cpu_notify(struct notifier_block *self, 1613static int timer_cpu_notify(struct notifier_block *self,
1614 unsigned long action, void *hcpu) 1614 unsigned long action, void *hcpu)
1615{ 1615{
1616 long cpu = (long)hcpu; 1616 long cpu = (long)hcpu;
@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
1635 return NOTIFY_OK; 1635 return NOTIFY_OK;
1636} 1636}
1637 1637
1638static struct notifier_block __cpuinitdata timers_nb = { 1638static struct notifier_block timers_nb = {
1639 .notifier_call = timer_cpu_notify, 1639 .notifier_call = timer_cpu_notify,
1640}; 1640};
1641 1641
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f02c4a4a0c3c..0b72e816b8d0 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -4644,7 +4644,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu)
4644 * Workqueues should be brought up before normal priority CPU notifiers. 4644 * Workqueues should be brought up before normal priority CPU notifiers.
4645 * This will be registered high priority CPU notifier. 4645 * This will be registered high priority CPU notifier.
4646 */ 4646 */
4647static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, 4647static int workqueue_cpu_up_callback(struct notifier_block *nfb,
4648 unsigned long action, 4648 unsigned long action,
4649 void *hcpu) 4649 void *hcpu)
4650{ 4650{
@@ -4697,7 +4697,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,
4697 * Workqueues should be brought down after normal priority CPU notifiers. 4697 * Workqueues should be brought down after normal priority CPU notifiers.
4698 * This will be registered as low priority CPU notifier. 4698 * This will be registered as low priority CPU notifier.
4699 */ 4699 */
4700static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, 4700static int workqueue_cpu_down_callback(struct notifier_block *nfb,
4701 unsigned long action, 4701 unsigned long action,
4702 void *hcpu) 4702 void *hcpu)
4703{ 4703{