diff options
author | Paul Gortmaker <paul.gortmaker@windriver.com> | 2013-06-19 14:53:51 -0400 |
---|---|---|
committer | Paul Gortmaker <paul.gortmaker@windriver.com> | 2013-07-14 19:36:59 -0400 |
commit | 0db0628d90125193280eabb501c94feaf48fa9ab (patch) | |
tree | 0e0ef0c4eac101d25a3bd125c4a9200ac4d294c0 /kernel | |
parent | 49fb4c6290c70c418a5c25eee996d6b55ea132d6 (diff) |
kernel: delete __cpuinit usage from all core kernel files
The __cpuinit type of throwaway sections might have made sense
some time ago when RAM was more constrained, but now the savings
do not offset the cost and complications. For example, the fix in
commit 5e427ec2d0 ("x86: Fix bit corruption at CPU resume time")
is a good example of the nasty type of bugs that can be created
with improper use of the various __init prefixes.
After a discussion on LKML[1] it was decided that cpuinit should go
the way of devinit and be phased out. Once all the users are gone,
we can then finally remove the macros themselves from linux/init.h.
This removes all the uses of the __cpuinit macros from C files in
the core kernel directories (kernel, init, lib, mm, and include)
that don't really have a specific maintainer.
[1] https://lkml.org/lkml/2013/5/20/589
Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpu.c | 6 | ||||
-rw-r--r-- | kernel/events/core.c | 4 | ||||
-rw-r--r-- | kernel/fork.c | 2 | ||||
-rw-r--r-- | kernel/hrtimer.c | 6 | ||||
-rw-r--r-- | kernel/printk.c | 2 | ||||
-rw-r--r-- | kernel/profile.c | 2 | ||||
-rw-r--r-- | kernel/relay.c | 2 | ||||
-rw-r--r-- | kernel/sched/core.c | 12 | ||||
-rw-r--r-- | kernel/sched/fair.c | 2 | ||||
-rw-r--r-- | kernel/smp.c | 2 | ||||
-rw-r--r-- | kernel/smpboot.c | 2 | ||||
-rw-r--r-- | kernel/softirq.c | 8 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 2 | ||||
-rw-r--r-- | kernel/timer.c | 10 | ||||
-rw-r--r-- | kernel/workqueue.c | 4 |
15 files changed, 33 insertions, 33 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c index 198a38883e64..b2b227b82123 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -366,7 +366,7 @@ EXPORT_SYMBOL(cpu_down); | |||
366 | #endif /*CONFIG_HOTPLUG_CPU*/ | 366 | #endif /*CONFIG_HOTPLUG_CPU*/ |
367 | 367 | ||
368 | /* Requires cpu_add_remove_lock to be held */ | 368 | /* Requires cpu_add_remove_lock to be held */ |
369 | static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen) | 369 | static int _cpu_up(unsigned int cpu, int tasks_frozen) |
370 | { | 370 | { |
371 | int ret, nr_calls = 0; | 371 | int ret, nr_calls = 0; |
372 | void *hcpu = (void *)(long)cpu; | 372 | void *hcpu = (void *)(long)cpu; |
@@ -419,7 +419,7 @@ out: | |||
419 | return ret; | 419 | return ret; |
420 | } | 420 | } |
421 | 421 | ||
422 | int __cpuinit cpu_up(unsigned int cpu) | 422 | int cpu_up(unsigned int cpu) |
423 | { | 423 | { |
424 | int err = 0; | 424 | int err = 0; |
425 | 425 | ||
@@ -618,7 +618,7 @@ core_initcall(cpu_hotplug_pm_sync_init); | |||
618 | * It must be called by the arch code on the new cpu, before the new cpu | 618 | * It must be called by the arch code on the new cpu, before the new cpu |
619 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). | 619 | * enables interrupts and before the "boot" cpu returns from __cpu_up(). |
620 | */ | 620 | */ |
621 | void __cpuinit notify_cpu_starting(unsigned int cpu) | 621 | void notify_cpu_starting(unsigned int cpu) |
622 | { | 622 | { |
623 | unsigned long val = CPU_STARTING; | 623 | unsigned long val = CPU_STARTING; |
624 | 624 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index eba8fb5834ae..f3e9dce39bc9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -7630,7 +7630,7 @@ static void __init perf_event_init_all_cpus(void) | |||
7630 | } | 7630 | } |
7631 | } | 7631 | } |
7632 | 7632 | ||
7633 | static void __cpuinit perf_event_init_cpu(int cpu) | 7633 | static void perf_event_init_cpu(int cpu) |
7634 | { | 7634 | { |
7635 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); | 7635 | struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu); |
7636 | 7636 | ||
@@ -7719,7 +7719,7 @@ static struct notifier_block perf_reboot_notifier = { | |||
7719 | .priority = INT_MIN, | 7719 | .priority = INT_MIN, |
7720 | }; | 7720 | }; |
7721 | 7721 | ||
7722 | static int __cpuinit | 7722 | static int |
7723 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) | 7723 | perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) |
7724 | { | 7724 | { |
7725 | unsigned int cpu = (long)hcpu; | 7725 | unsigned int cpu = (long)hcpu; |
diff --git a/kernel/fork.c b/kernel/fork.c index 66635c80a813..403d2bb8a968 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1546,7 +1546,7 @@ static inline void init_idle_pids(struct pid_link *links) | |||
1546 | } | 1546 | } |
1547 | } | 1547 | } |
1548 | 1548 | ||
1549 | struct task_struct * __cpuinit fork_idle(int cpu) | 1549 | struct task_struct *fork_idle(int cpu) |
1550 | { | 1550 | { |
1551 | struct task_struct *task; | 1551 | struct task_struct *task; |
1552 | task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); | 1552 | task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0); |
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index f0f4fe29cd21..383319bae3f7 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c | |||
@@ -1659,7 +1659,7 @@ SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, | |||
1659 | /* | 1659 | /* |
1660 | * Functions related to boot-time initialization: | 1660 | * Functions related to boot-time initialization: |
1661 | */ | 1661 | */ |
1662 | static void __cpuinit init_hrtimers_cpu(int cpu) | 1662 | static void init_hrtimers_cpu(int cpu) |
1663 | { | 1663 | { |
1664 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); | 1664 | struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); |
1665 | int i; | 1665 | int i; |
@@ -1740,7 +1740,7 @@ static void migrate_hrtimers(int scpu) | |||
1740 | 1740 | ||
1741 | #endif /* CONFIG_HOTPLUG_CPU */ | 1741 | #endif /* CONFIG_HOTPLUG_CPU */ |
1742 | 1742 | ||
1743 | static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | 1743 | static int hrtimer_cpu_notify(struct notifier_block *self, |
1744 | unsigned long action, void *hcpu) | 1744 | unsigned long action, void *hcpu) |
1745 | { | 1745 | { |
1746 | int scpu = (long)hcpu; | 1746 | int scpu = (long)hcpu; |
@@ -1773,7 +1773,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self, | |||
1773 | return NOTIFY_OK; | 1773 | return NOTIFY_OK; |
1774 | } | 1774 | } |
1775 | 1775 | ||
1776 | static struct notifier_block __cpuinitdata hrtimers_nb = { | 1776 | static struct notifier_block hrtimers_nb = { |
1777 | .notifier_call = hrtimer_cpu_notify, | 1777 | .notifier_call = hrtimer_cpu_notify, |
1778 | }; | 1778 | }; |
1779 | 1779 | ||
diff --git a/kernel/printk.c b/kernel/printk.c index d37d45c90ae6..69b0890ed7e5 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -1921,7 +1921,7 @@ void resume_console(void) | |||
1921 | * called when a new CPU comes online (or fails to come up), and ensures | 1921 | * called when a new CPU comes online (or fails to come up), and ensures |
1922 | * that any such output gets printed. | 1922 | * that any such output gets printed. |
1923 | */ | 1923 | */ |
1924 | static int __cpuinit console_cpu_notify(struct notifier_block *self, | 1924 | static int console_cpu_notify(struct notifier_block *self, |
1925 | unsigned long action, void *hcpu) | 1925 | unsigned long action, void *hcpu) |
1926 | { | 1926 | { |
1927 | switch (action) { | 1927 | switch (action) { |
diff --git a/kernel/profile.c b/kernel/profile.c index 0bf400737660..6631e1ef55ab 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -331,7 +331,7 @@ out: | |||
331 | put_cpu(); | 331 | put_cpu(); |
332 | } | 332 | } |
333 | 333 | ||
334 | static int __cpuinit profile_cpu_callback(struct notifier_block *info, | 334 | static int profile_cpu_callback(struct notifier_block *info, |
335 | unsigned long action, void *__cpu) | 335 | unsigned long action, void *__cpu) |
336 | { | 336 | { |
337 | int node, cpu = (unsigned long)__cpu; | 337 | int node, cpu = (unsigned long)__cpu; |
diff --git a/kernel/relay.c b/kernel/relay.c index b91488ba2e5a..5001c9887db1 100644 --- a/kernel/relay.c +++ b/kernel/relay.c | |||
@@ -516,7 +516,7 @@ static void setup_callbacks(struct rchan *chan, | |||
516 | * | 516 | * |
517 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) | 517 | * Returns the success/failure of the operation. (%NOTIFY_OK, %NOTIFY_BAD) |
518 | */ | 518 | */ |
519 | static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb, | 519 | static int relay_hotcpu_callback(struct notifier_block *nb, |
520 | unsigned long action, | 520 | unsigned long action, |
521 | void *hcpu) | 521 | void *hcpu) |
522 | { | 522 | { |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 0d8eb4525e76..b7c32cb7bfeb 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -4133,7 +4133,7 @@ void show_state_filter(unsigned long state_filter) | |||
4133 | debug_show_all_locks(); | 4133 | debug_show_all_locks(); |
4134 | } | 4134 | } |
4135 | 4135 | ||
4136 | void __cpuinit init_idle_bootup_task(struct task_struct *idle) | 4136 | void init_idle_bootup_task(struct task_struct *idle) |
4137 | { | 4137 | { |
4138 | idle->sched_class = &idle_sched_class; | 4138 | idle->sched_class = &idle_sched_class; |
4139 | } | 4139 | } |
@@ -4146,7 +4146,7 @@ void __cpuinit init_idle_bootup_task(struct task_struct *idle) | |||
4146 | * NOTE: this function does not set the idle thread's NEED_RESCHED | 4146 | * NOTE: this function does not set the idle thread's NEED_RESCHED |
4147 | * flag, to make booting more robust. | 4147 | * flag, to make booting more robust. |
4148 | */ | 4148 | */ |
4149 | void __cpuinit init_idle(struct task_struct *idle, int cpu) | 4149 | void init_idle(struct task_struct *idle, int cpu) |
4150 | { | 4150 | { |
4151 | struct rq *rq = cpu_rq(cpu); | 4151 | struct rq *rq = cpu_rq(cpu); |
4152 | unsigned long flags; | 4152 | unsigned long flags; |
@@ -4630,7 +4630,7 @@ static void set_rq_offline(struct rq *rq) | |||
4630 | * migration_call - callback that gets triggered when a CPU is added. | 4630 | * migration_call - callback that gets triggered when a CPU is added. |
4631 | * Here we can start up the necessary migration thread for the new CPU. | 4631 | * Here we can start up the necessary migration thread for the new CPU. |
4632 | */ | 4632 | */ |
4633 | static int __cpuinit | 4633 | static int |
4634 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | 4634 | migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) |
4635 | { | 4635 | { |
4636 | int cpu = (long)hcpu; | 4636 | int cpu = (long)hcpu; |
@@ -4684,12 +4684,12 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
4684 | * happens before everything else. This has to be lower priority than | 4684 | * happens before everything else. This has to be lower priority than |
4685 | * the notifier in the perf_event subsystem, though. | 4685 | * the notifier in the perf_event subsystem, though. |
4686 | */ | 4686 | */ |
4687 | static struct notifier_block __cpuinitdata migration_notifier = { | 4687 | static struct notifier_block migration_notifier = { |
4688 | .notifier_call = migration_call, | 4688 | .notifier_call = migration_call, |
4689 | .priority = CPU_PRI_MIGRATION, | 4689 | .priority = CPU_PRI_MIGRATION, |
4690 | }; | 4690 | }; |
4691 | 4691 | ||
4692 | static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | 4692 | static int sched_cpu_active(struct notifier_block *nfb, |
4693 | unsigned long action, void *hcpu) | 4693 | unsigned long action, void *hcpu) |
4694 | { | 4694 | { |
4695 | switch (action & ~CPU_TASKS_FROZEN) { | 4695 | switch (action & ~CPU_TASKS_FROZEN) { |
@@ -4702,7 +4702,7 @@ static int __cpuinit sched_cpu_active(struct notifier_block *nfb, | |||
4702 | } | 4702 | } |
4703 | } | 4703 | } |
4704 | 4704 | ||
4705 | static int __cpuinit sched_cpu_inactive(struct notifier_block *nfb, | 4705 | static int sched_cpu_inactive(struct notifier_block *nfb, |
4706 | unsigned long action, void *hcpu) | 4706 | unsigned long action, void *hcpu) |
4707 | { | 4707 | { |
4708 | switch (action & ~CPU_TASKS_FROZEN) { | 4708 | switch (action & ~CPU_TASKS_FROZEN) { |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f77f9c527449..bb456f44b7b1 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -5506,7 +5506,7 @@ void nohz_balance_enter_idle(int cpu) | |||
5506 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); | 5506 | set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); |
5507 | } | 5507 | } |
5508 | 5508 | ||
5509 | static int __cpuinit sched_ilb_notifier(struct notifier_block *nfb, | 5509 | static int sched_ilb_notifier(struct notifier_block *nfb, |
5510 | unsigned long action, void *hcpu) | 5510 | unsigned long action, void *hcpu) |
5511 | { | 5511 | { |
5512 | switch (action & ~CPU_TASKS_FROZEN) { | 5512 | switch (action & ~CPU_TASKS_FROZEN) { |
diff --git a/kernel/smp.c b/kernel/smp.c index 4dba0f7b72ad..fe9f773d7114 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
73 | return NOTIFY_OK; | 73 | return NOTIFY_OK; |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct notifier_block __cpuinitdata hotplug_cfd_notifier = { | 76 | static struct notifier_block hotplug_cfd_notifier = { |
77 | .notifier_call = hotplug_cfd, | 77 | .notifier_call = hotplug_cfd, |
78 | }; | 78 | }; |
79 | 79 | ||
diff --git a/kernel/smpboot.c b/kernel/smpboot.c index 02fc5c933673..eb89e1807408 100644 --- a/kernel/smpboot.c +++ b/kernel/smpboot.c | |||
@@ -24,7 +24,7 @@ | |||
24 | */ | 24 | */ |
25 | static DEFINE_PER_CPU(struct task_struct *, idle_threads); | 25 | static DEFINE_PER_CPU(struct task_struct *, idle_threads); |
26 | 26 | ||
27 | struct task_struct * __cpuinit idle_thread_get(unsigned int cpu) | 27 | struct task_struct *idle_thread_get(unsigned int cpu) |
28 | { | 28 | { |
29 | struct task_struct *tsk = per_cpu(idle_threads, cpu); | 29 | struct task_struct *tsk = per_cpu(idle_threads, cpu); |
30 | 30 | ||
diff --git a/kernel/softirq.c b/kernel/softirq.c index ca25e6e704a2..be3d3514c325 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -699,7 +699,7 @@ void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq) | |||
699 | } | 699 | } |
700 | EXPORT_SYMBOL(send_remote_softirq); | 700 | EXPORT_SYMBOL(send_remote_softirq); |
701 | 701 | ||
702 | static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | 702 | static int remote_softirq_cpu_notify(struct notifier_block *self, |
703 | unsigned long action, void *hcpu) | 703 | unsigned long action, void *hcpu) |
704 | { | 704 | { |
705 | /* | 705 | /* |
@@ -728,7 +728,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self, | |||
728 | return NOTIFY_OK; | 728 | return NOTIFY_OK; |
729 | } | 729 | } |
730 | 730 | ||
731 | static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = { | 731 | static struct notifier_block remote_softirq_cpu_notifier = { |
732 | .notifier_call = remote_softirq_cpu_notify, | 732 | .notifier_call = remote_softirq_cpu_notify, |
733 | }; | 733 | }; |
734 | 734 | ||
@@ -830,7 +830,7 @@ static void takeover_tasklets(unsigned int cpu) | |||
830 | } | 830 | } |
831 | #endif /* CONFIG_HOTPLUG_CPU */ | 831 | #endif /* CONFIG_HOTPLUG_CPU */ |
832 | 832 | ||
833 | static int __cpuinit cpu_callback(struct notifier_block *nfb, | 833 | static int cpu_callback(struct notifier_block *nfb, |
834 | unsigned long action, | 834 | unsigned long action, |
835 | void *hcpu) | 835 | void *hcpu) |
836 | { | 836 | { |
@@ -845,7 +845,7 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, | |||
845 | return NOTIFY_OK; | 845 | return NOTIFY_OK; |
846 | } | 846 | } |
847 | 847 | ||
848 | static struct notifier_block __cpuinitdata cpu_nfb = { | 848 | static struct notifier_block cpu_nfb = { |
849 | .notifier_call = cpu_callback | 849 | .notifier_call = cpu_callback |
850 | }; | 850 | }; |
851 | 851 | ||
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 69601726a745..e80183f4a6c4 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -298,7 +298,7 @@ static int __init tick_nohz_full_setup(char *str) | |||
298 | } | 298 | } |
299 | __setup("nohz_full=", tick_nohz_full_setup); | 299 | __setup("nohz_full=", tick_nohz_full_setup); |
300 | 300 | ||
301 | static int __cpuinit tick_nohz_cpu_down_callback(struct notifier_block *nfb, | 301 | static int tick_nohz_cpu_down_callback(struct notifier_block *nfb, |
302 | unsigned long action, | 302 | unsigned long action, |
303 | void *hcpu) | 303 | void *hcpu) |
304 | { | 304 | { |
diff --git a/kernel/timer.c b/kernel/timer.c index 15bc1b41021d..4296d13db3d1 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1505,11 +1505,11 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout) | |||
1505 | } | 1505 | } |
1506 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); | 1506 | EXPORT_SYMBOL(schedule_timeout_uninterruptible); |
1507 | 1507 | ||
1508 | static int __cpuinit init_timers_cpu(int cpu) | 1508 | static int init_timers_cpu(int cpu) |
1509 | { | 1509 | { |
1510 | int j; | 1510 | int j; |
1511 | struct tvec_base *base; | 1511 | struct tvec_base *base; |
1512 | static char __cpuinitdata tvec_base_done[NR_CPUS]; | 1512 | static char tvec_base_done[NR_CPUS]; |
1513 | 1513 | ||
1514 | if (!tvec_base_done[cpu]) { | 1514 | if (!tvec_base_done[cpu]) { |
1515 | static char boot_done; | 1515 | static char boot_done; |
@@ -1577,7 +1577,7 @@ static void migrate_timer_list(struct tvec_base *new_base, struct list_head *hea | |||
1577 | } | 1577 | } |
1578 | } | 1578 | } |
1579 | 1579 | ||
1580 | static void __cpuinit migrate_timers(int cpu) | 1580 | static void migrate_timers(int cpu) |
1581 | { | 1581 | { |
1582 | struct tvec_base *old_base; | 1582 | struct tvec_base *old_base; |
1583 | struct tvec_base *new_base; | 1583 | struct tvec_base *new_base; |
@@ -1610,7 +1610,7 @@ static void __cpuinit migrate_timers(int cpu) | |||
1610 | } | 1610 | } |
1611 | #endif /* CONFIG_HOTPLUG_CPU */ | 1611 | #endif /* CONFIG_HOTPLUG_CPU */ |
1612 | 1612 | ||
1613 | static int __cpuinit timer_cpu_notify(struct notifier_block *self, | 1613 | static int timer_cpu_notify(struct notifier_block *self, |
1614 | unsigned long action, void *hcpu) | 1614 | unsigned long action, void *hcpu) |
1615 | { | 1615 | { |
1616 | long cpu = (long)hcpu; | 1616 | long cpu = (long)hcpu; |
@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self, | |||
1635 | return NOTIFY_OK; | 1635 | return NOTIFY_OK; |
1636 | } | 1636 | } |
1637 | 1637 | ||
1638 | static struct notifier_block __cpuinitdata timers_nb = { | 1638 | static struct notifier_block timers_nb = { |
1639 | .notifier_call = timer_cpu_notify, | 1639 | .notifier_call = timer_cpu_notify, |
1640 | }; | 1640 | }; |
1641 | 1641 | ||
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index f02c4a4a0c3c..0b72e816b8d0 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -4644,7 +4644,7 @@ static void restore_unbound_workers_cpumask(struct worker_pool *pool, int cpu) | |||
4644 | * Workqueues should be brought up before normal priority CPU notifiers. | 4644 | * Workqueues should be brought up before normal priority CPU notifiers. |
4645 | * This will be registered high priority CPU notifier. | 4645 | * This will be registered high priority CPU notifier. |
4646 | */ | 4646 | */ |
4647 | static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | 4647 | static int workqueue_cpu_up_callback(struct notifier_block *nfb, |
4648 | unsigned long action, | 4648 | unsigned long action, |
4649 | void *hcpu) | 4649 | void *hcpu) |
4650 | { | 4650 | { |
@@ -4697,7 +4697,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb, | |||
4697 | * Workqueues should be brought down after normal priority CPU notifiers. | 4697 | * Workqueues should be brought down after normal priority CPU notifiers. |
4698 | * This will be registered as low priority CPU notifier. | 4698 | * This will be registered as low priority CPU notifier. |
4699 | */ | 4699 | */ |
4700 | static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb, | 4700 | static int workqueue_cpu_down_callback(struct notifier_block *nfb, |
4701 | unsigned long action, | 4701 | unsigned long action, |
4702 | void *hcpu) | 4702 | void *hcpu) |
4703 | { | 4703 | { |