diff options
-rw-r--r-- | arch/s390/kernel/kprobes.c | 6 | ||||
-rw-r--r-- | drivers/char/hw_random/intel-rng.c | 6 | ||||
-rw-r--r-- | include/linux/stop_machine.h | 50 | ||||
-rw-r--r-- | kernel/cpu.c | 16 | ||||
-rw-r--r-- | kernel/module.c | 33 | ||||
-rw-r--r-- | kernel/rcuclassic.c | 4 | ||||
-rw-r--r-- | kernel/stop_machine.c | 288 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 6 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 |
9 files changed, 199 insertions, 214 deletions
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c index 4f82e5b5f879..569079ec4ff0 100644 --- a/arch/s390/kernel/kprobes.c +++ b/arch/s390/kernel/kprobes.c | |||
@@ -197,7 +197,7 @@ void __kprobes arch_arm_kprobe(struct kprobe *p) | |||
197 | args.new = BREAKPOINT_INSTRUCTION; | 197 | args.new = BREAKPOINT_INSTRUCTION; |
198 | 198 | ||
199 | kcb->kprobe_status = KPROBE_SWAP_INST; | 199 | kcb->kprobe_status = KPROBE_SWAP_INST; |
200 | stop_machine_run(swap_instruction, &args, NR_CPUS); | 200 | stop_machine(swap_instruction, &args, NULL); |
201 | kcb->kprobe_status = status; | 201 | kcb->kprobe_status = status; |
202 | } | 202 | } |
203 | 203 | ||
@@ -212,7 +212,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) | |||
212 | args.new = p->opcode; | 212 | args.new = p->opcode; |
213 | 213 | ||
214 | kcb->kprobe_status = KPROBE_SWAP_INST; | 214 | kcb->kprobe_status = KPROBE_SWAP_INST; |
215 | stop_machine_run(swap_instruction, &args, NR_CPUS); | 215 | stop_machine(swap_instruction, &args, NULL); |
216 | kcb->kprobe_status = status; | 216 | kcb->kprobe_status = status; |
217 | } | 217 | } |
218 | 218 | ||
@@ -331,7 +331,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs) | |||
331 | * No kprobe at this address. The fault has not been | 331 | * No kprobe at this address. The fault has not been |
332 | * caused by a kprobe breakpoint. The race of breakpoint | 332 | * caused by a kprobe breakpoint. The race of breakpoint |
333 | * vs. kprobe remove does not exist because on s390 we | 333 | * vs. kprobe remove does not exist because on s390 we |
334 | * use stop_machine_run to arm/disarm the breakpoints. | 334 | * use stop_machine to arm/disarm the breakpoints. |
335 | */ | 335 | */ |
336 | goto no_kprobe; | 336 | goto no_kprobe; |
337 | 337 | ||
diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c index 27fdc0866496..8a2fce0756ec 100644 --- a/drivers/char/hw_random/intel-rng.c +++ b/drivers/char/hw_random/intel-rng.c | |||
@@ -241,7 +241,7 @@ static int __init intel_rng_hw_init(void *_intel_rng_hw) | |||
241 | struct intel_rng_hw *intel_rng_hw = _intel_rng_hw; | 241 | struct intel_rng_hw *intel_rng_hw = _intel_rng_hw; |
242 | u8 mfc, dvc; | 242 | u8 mfc, dvc; |
243 | 243 | ||
244 | /* interrupts disabled in stop_machine_run call */ | 244 | /* interrupts disabled in stop_machine call */ |
245 | 245 | ||
246 | if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) | 246 | if (!(intel_rng_hw->fwh_dec_en1_val & FWH_F8_EN_MASK)) |
247 | pci_write_config_byte(intel_rng_hw->dev, | 247 | pci_write_config_byte(intel_rng_hw->dev, |
@@ -365,10 +365,10 @@ static int __init mod_init(void) | |||
365 | * location with the Read ID command, all activity on the system | 365 | * location with the Read ID command, all activity on the system |
366 | * must be stopped until the state is back to normal. | 366 | * must be stopped until the state is back to normal. |
367 | * | 367 | * |
368 | * Use stop_machine_run because IPIs can be blocked by disabling | 368 | * Use stop_machine because IPIs can be blocked by disabling |
369 | * interrupts. | 369 | * interrupts. |
370 | */ | 370 | */ |
371 | err = stop_machine_run(intel_rng_hw_init, intel_rng_hw, NR_CPUS); | 371 | err = stop_machine(intel_rng_hw_init, intel_rng_hw, NULL); |
372 | pci_dev_put(dev); | 372 | pci_dev_put(dev); |
373 | iounmap(intel_rng_hw->mem); | 373 | iounmap(intel_rng_hw->mem); |
374 | kfree(intel_rng_hw); | 374 | kfree(intel_rng_hw); |
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h index 5bfc553bdb21..f1cb0ba6d715 100644 --- a/include/linux/stop_machine.h +++ b/include/linux/stop_machine.h | |||
@@ -5,41 +5,43 @@ | |||
5 | (and more). So the "read" side to such a lock is anything which | 5 | (and more). So the "read" side to such a lock is anything which |
6 | diables preeempt. */ | 6 | diables preeempt. */ |
7 | #include <linux/cpu.h> | 7 | #include <linux/cpu.h> |
8 | #include <linux/cpumask.h> | ||
8 | #include <asm/system.h> | 9 | #include <asm/system.h> |
9 | 10 | ||
10 | #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) | 11 | #if defined(CONFIG_STOP_MACHINE) && defined(CONFIG_SMP) |
12 | |||
13 | /* Deprecated, but useful for transition. */ | ||
14 | #define ALL_CPUS ~0U | ||
15 | |||
11 | /** | 16 | /** |
12 | * stop_machine_run: freeze the machine on all CPUs and run this function | 17 | * stop_machine: freeze the machine on all CPUs and run this function |
13 | * @fn: the function to run | 18 | * @fn: the function to run |
14 | * @data: the data ptr for the @fn() | 19 | * @data: the data ptr for the @fn() |
15 | * @cpu: the cpu to run @fn() on (or any, if @cpu == NR_CPUS. | 20 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
16 | * | 21 | * |
17 | * Description: This causes a thread to be scheduled on every other cpu, | 22 | * Description: This causes a thread to be scheduled on every cpu, |
18 | * each of which disables interrupts, and finally interrupts are disabled | 23 | * each of which disables interrupts. The result is that noone is |
19 | * on the current CPU. The result is that noone is holding a spinlock | 24 | * holding a spinlock or inside any other preempt-disabled region when |
20 | * or inside any other preempt-disabled region when @fn() runs. | 25 | * @fn() runs. |
21 | * | 26 | * |
22 | * This can be thought of as a very heavy write lock, equivalent to | 27 | * This can be thought of as a very heavy write lock, equivalent to |
23 | * grabbing every spinlock in the kernel. */ | 28 | * grabbing every spinlock in the kernel. */ |
24 | int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu); | 29 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); |
25 | 30 | ||
26 | /** | 31 | /** |
27 | * __stop_machine_run: freeze the machine on all CPUs and run this function | 32 | * __stop_machine: freeze the machine on all CPUs and run this function |
28 | * @fn: the function to run | 33 | * @fn: the function to run |
29 | * @data: the data ptr for the @fn | 34 | * @data: the data ptr for the @fn |
30 | * @cpu: the cpu to run @fn on (or any, if @cpu == NR_CPUS. | 35 | * @cpus: the cpus to run the @fn() on (NULL = any online cpu) |
31 | * | 36 | * |
32 | * Description: This is a special version of the above, which returns the | 37 | * Description: This is a special version of the above, which assumes cpus |
33 | * thread which has run @fn(): kthread_stop will return the return value | 38 | * won't come or go while it's being called. Used by hotplug cpu. |
34 | * of @fn(). Used by hotplug cpu. | ||
35 | */ | 39 | */ |
36 | struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, | 40 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus); |
37 | unsigned int cpu); | ||
38 | |||
39 | #else | 41 | #else |
40 | 42 | ||
41 | static inline int stop_machine_run(int (*fn)(void *), void *data, | 43 | static inline int stop_machine(int (*fn)(void *), void *data, |
42 | unsigned int cpu) | 44 | const cpumask_t *cpus) |
43 | { | 45 | { |
44 | int ret; | 46 | int ret; |
45 | local_irq_disable(); | 47 | local_irq_disable(); |
@@ -48,4 +50,18 @@ static inline int stop_machine_run(int (*fn)(void *), void *data, | |||
48 | return ret; | 50 | return ret; |
49 | } | 51 | } |
50 | #endif /* CONFIG_SMP */ | 52 | #endif /* CONFIG_SMP */ |
53 | |||
54 | static inline int __deprecated stop_machine_run(int (*fn)(void *), void *data, | ||
55 | unsigned int cpu) | ||
56 | { | ||
57 | /* If they don't care which cpu fn runs on, just pick one. */ | ||
58 | if (cpu == NR_CPUS) | ||
59 | return stop_machine(fn, data, NULL); | ||
60 | else if (cpu == ~0U) | ||
61 | return stop_machine(fn, data, &cpu_possible_map); | ||
62 | else { | ||
63 | cpumask_t cpus = cpumask_of_cpu(cpu); | ||
64 | return stop_machine(fn, data, &cpus); | ||
65 | } | ||
66 | } | ||
51 | #endif /* _LINUX_STOP_MACHINE */ | 67 | #endif /* _LINUX_STOP_MACHINE */ |
diff --git a/kernel/cpu.c b/kernel/cpu.c index 10ba5f1004a5..29510d68338a 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c | |||
@@ -216,7 +216,6 @@ static int __ref take_cpu_down(void *_param) | |||
216 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | 216 | static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) |
217 | { | 217 | { |
218 | int err, nr_calls = 0; | 218 | int err, nr_calls = 0; |
219 | struct task_struct *p; | ||
220 | cpumask_t old_allowed, tmp; | 219 | cpumask_t old_allowed, tmp; |
221 | void *hcpu = (void *)(long)cpu; | 220 | void *hcpu = (void *)(long)cpu; |
222 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; | 221 | unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; |
@@ -249,21 +248,18 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
249 | cpus_setall(tmp); | 248 | cpus_setall(tmp); |
250 | cpu_clear(cpu, tmp); | 249 | cpu_clear(cpu, tmp); |
251 | set_cpus_allowed_ptr(current, &tmp); | 250 | set_cpus_allowed_ptr(current, &tmp); |
251 | tmp = cpumask_of_cpu(cpu); | ||
252 | 252 | ||
253 | p = __stop_machine_run(take_cpu_down, &tcd_param, cpu); | 253 | err = __stop_machine(take_cpu_down, &tcd_param, &tmp); |
254 | 254 | if (err) { | |
255 | if (IS_ERR(p) || cpu_online(cpu)) { | ||
256 | /* CPU didn't die: tell everyone. Can't complain. */ | 255 | /* CPU didn't die: tell everyone. Can't complain. */ |
257 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, | 256 | if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, |
258 | hcpu) == NOTIFY_BAD) | 257 | hcpu) == NOTIFY_BAD) |
259 | BUG(); | 258 | BUG(); |
260 | 259 | ||
261 | if (IS_ERR(p)) { | 260 | goto out_allowed; |
262 | err = PTR_ERR(p); | ||
263 | goto out_allowed; | ||
264 | } | ||
265 | goto out_thread; | ||
266 | } | 261 | } |
262 | BUG_ON(cpu_online(cpu)); | ||
267 | 263 | ||
268 | /* Wait for it to sleep (leaving idle task). */ | 264 | /* Wait for it to sleep (leaving idle task). */ |
269 | while (!idle_cpu(cpu)) | 265 | while (!idle_cpu(cpu)) |
@@ -279,8 +275,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen) | |||
279 | 275 | ||
280 | check_for_tasks(cpu); | 276 | check_for_tasks(cpu); |
281 | 277 | ||
282 | out_thread: | ||
283 | err = kthread_stop(p); | ||
284 | out_allowed: | 278 | out_allowed: |
285 | set_cpus_allowed_ptr(current, &old_allowed); | 279 | set_cpus_allowed_ptr(current, &old_allowed); |
286 | out_release: | 280 | out_release: |
diff --git a/kernel/module.c b/kernel/module.c index d8b5605132a0..61d212120df4 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -325,18 +325,6 @@ static unsigned long find_symbol(const char *name, | |||
325 | return -ENOENT; | 325 | return -ENOENT; |
326 | } | 326 | } |
327 | 327 | ||
328 | /* lookup symbol in given range of kernel_symbols */ | ||
329 | static const struct kernel_symbol *lookup_symbol(const char *name, | ||
330 | const struct kernel_symbol *start, | ||
331 | const struct kernel_symbol *stop) | ||
332 | { | ||
333 | const struct kernel_symbol *ks = start; | ||
334 | for (; ks < stop; ks++) | ||
335 | if (strcmp(ks->name, name) == 0) | ||
336 | return ks; | ||
337 | return NULL; | ||
338 | } | ||
339 | |||
340 | /* Search for module by name: must hold module_mutex. */ | 328 | /* Search for module by name: must hold module_mutex. */ |
341 | static struct module *find_module(const char *name) | 329 | static struct module *find_module(const char *name) |
342 | { | 330 | { |
@@ -690,7 +678,7 @@ static int try_stop_module(struct module *mod, int flags, int *forced) | |||
690 | if (flags & O_NONBLOCK) { | 678 | if (flags & O_NONBLOCK) { |
691 | struct stopref sref = { mod, flags, forced }; | 679 | struct stopref sref = { mod, flags, forced }; |
692 | 680 | ||
693 | return stop_machine_run(__try_stop_module, &sref, NR_CPUS); | 681 | return stop_machine(__try_stop_module, &sref, NULL); |
694 | } else { | 682 | } else { |
695 | /* We don't need to stop the machine for this. */ | 683 | /* We don't need to stop the machine for this. */ |
696 | mod->state = MODULE_STATE_GOING; | 684 | mod->state = MODULE_STATE_GOING; |
@@ -1428,7 +1416,7 @@ static int __unlink_module(void *_mod) | |||
1428 | static void free_module(struct module *mod) | 1416 | static void free_module(struct module *mod) |
1429 | { | 1417 | { |
1430 | /* Delete from various lists */ | 1418 | /* Delete from various lists */ |
1431 | stop_machine_run(__unlink_module, mod, NR_CPUS); | 1419 | stop_machine(__unlink_module, mod, NULL); |
1432 | remove_notes_attrs(mod); | 1420 | remove_notes_attrs(mod); |
1433 | remove_sect_attrs(mod); | 1421 | remove_sect_attrs(mod); |
1434 | mod_kobject_remove(mod); | 1422 | mod_kobject_remove(mod); |
@@ -1703,6 +1691,19 @@ static void setup_modinfo(struct module *mod, Elf_Shdr *sechdrs, | |||
1703 | } | 1691 | } |
1704 | 1692 | ||
1705 | #ifdef CONFIG_KALLSYMS | 1693 | #ifdef CONFIG_KALLSYMS |
1694 | |||
1695 | /* lookup symbol in given range of kernel_symbols */ | ||
1696 | static const struct kernel_symbol *lookup_symbol(const char *name, | ||
1697 | const struct kernel_symbol *start, | ||
1698 | const struct kernel_symbol *stop) | ||
1699 | { | ||
1700 | const struct kernel_symbol *ks = start; | ||
1701 | for (; ks < stop; ks++) | ||
1702 | if (strcmp(ks->name, name) == 0) | ||
1703 | return ks; | ||
1704 | return NULL; | ||
1705 | } | ||
1706 | |||
1706 | static int is_exported(const char *name, const struct module *mod) | 1707 | static int is_exported(const char *name, const struct module *mod) |
1707 | { | 1708 | { |
1708 | if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) | 1709 | if (!mod && lookup_symbol(name, __start___ksymtab, __stop___ksymtab)) |
@@ -2196,7 +2197,7 @@ static struct module *load_module(void __user *umod, | |||
2196 | /* Now sew it into the lists so we can get lockdep and oops | 2197 | /* Now sew it into the lists so we can get lockdep and oops |
2197 | * info during argument parsing. Noone should access us, since | 2198 | * info during argument parsing. Noone should access us, since |
2198 | * strong_try_module_get() will fail. */ | 2199 | * strong_try_module_get() will fail. */ |
2199 | stop_machine_run(__link_module, mod, NR_CPUS); | 2200 | stop_machine(__link_module, mod, NULL); |
2200 | 2201 | ||
2201 | /* Size of section 0 is 0, so this works well if no params */ | 2202 | /* Size of section 0 is 0, so this works well if no params */ |
2202 | err = parse_args(mod->name, mod->args, | 2203 | err = parse_args(mod->name, mod->args, |
@@ -2230,7 +2231,7 @@ static struct module *load_module(void __user *umod, | |||
2230 | return mod; | 2231 | return mod; |
2231 | 2232 | ||
2232 | unlink: | 2233 | unlink: |
2233 | stop_machine_run(__unlink_module, mod, NR_CPUS); | 2234 | stop_machine(__unlink_module, mod, NULL); |
2234 | module_arch_cleanup(mod); | 2235 | module_arch_cleanup(mod); |
2235 | cleanup: | 2236 | cleanup: |
2236 | kobject_del(&mod->mkobj.kobj); | 2237 | kobject_del(&mod->mkobj.kobj); |
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c index 6f8696c502f4..aad93cdc9f68 100644 --- a/kernel/rcuclassic.c +++ b/kernel/rcuclassic.c | |||
@@ -91,8 +91,8 @@ static void force_quiescent_state(struct rcu_data *rdp, | |||
91 | * rdp->cpu is the current cpu. | 91 | * rdp->cpu is the current cpu. |
92 | * | 92 | * |
93 | * cpu_online_map is updated by the _cpu_down() | 93 | * cpu_online_map is updated by the _cpu_down() |
94 | * using stop_machine_run(). Since we're in irqs disabled | 94 | * using __stop_machine(). Since we're in irqs disabled |
95 | * section, stop_machine_run() is not exectuting, hence | 95 | * section, __stop_machine() is not exectuting, hence |
96 | * the cpu_online_map is stable. | 96 | * the cpu_online_map is stable. |
97 | * | 97 | * |
98 | * However, a cpu might have been offlined _just_ before | 98 | * However, a cpu might have been offlined _just_ before |
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c index 738b411ff2d3..e446c7c7d6a9 100644 --- a/kernel/stop_machine.c +++ b/kernel/stop_machine.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. | 1 | /* Copyright 2008, 2005 Rusty Russell rusty@rustcorp.com.au IBM Corporation. |
2 | * GPL v2 and any later version. | 2 | * GPL v2 and any later version. |
3 | */ | 3 | */ |
4 | #include <linux/cpu.h> | 4 | #include <linux/cpu.h> |
@@ -13,204 +13,178 @@ | |||
13 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
14 | #include <asm/uaccess.h> | 14 | #include <asm/uaccess.h> |
15 | 15 | ||
16 | /* Since we effect priority and affinity (both of which are visible | 16 | /* This controls the threads on each CPU. */ |
17 | * to, and settable by outside processes) we do indirection via a | ||
18 | * kthread. */ | ||
19 | |||
20 | /* Thread to stop each CPU in user context. */ | ||
21 | enum stopmachine_state { | 17 | enum stopmachine_state { |
22 | STOPMACHINE_WAIT, | 18 | /* Dummy starting state for thread. */ |
19 | STOPMACHINE_NONE, | ||
20 | /* Awaiting everyone to be scheduled. */ | ||
23 | STOPMACHINE_PREPARE, | 21 | STOPMACHINE_PREPARE, |
22 | /* Disable interrupts. */ | ||
24 | STOPMACHINE_DISABLE_IRQ, | 23 | STOPMACHINE_DISABLE_IRQ, |
24 | /* Run the function */ | ||
25 | STOPMACHINE_RUN, | ||
26 | /* Exit */ | ||
25 | STOPMACHINE_EXIT, | 27 | STOPMACHINE_EXIT, |
26 | }; | 28 | }; |
29 | static enum stopmachine_state state; | ||
27 | 30 | ||
28 | static enum stopmachine_state stopmachine_state; | 31 | struct stop_machine_data { |
29 | static unsigned int stopmachine_num_threads; | 32 | int (*fn)(void *); |
30 | static atomic_t stopmachine_thread_ack; | 33 | void *data; |
31 | 34 | int fnret; | |
32 | static int stopmachine(void *cpu) | 35 | }; |
33 | { | ||
34 | int irqs_disabled = 0; | ||
35 | int prepared = 0; | ||
36 | cpumask_of_cpu_ptr(cpumask, (int)(long)cpu); | ||
37 | |||
38 | set_cpus_allowed_ptr(current, cpumask); | ||
39 | |||
40 | /* Ack: we are alive */ | ||
41 | smp_mb(); /* Theoretically the ack = 0 might not be on this CPU yet. */ | ||
42 | atomic_inc(&stopmachine_thread_ack); | ||
43 | |||
44 | /* Simple state machine */ | ||
45 | while (stopmachine_state != STOPMACHINE_EXIT) { | ||
46 | if (stopmachine_state == STOPMACHINE_DISABLE_IRQ | ||
47 | && !irqs_disabled) { | ||
48 | local_irq_disable(); | ||
49 | hard_irq_disable(); | ||
50 | irqs_disabled = 1; | ||
51 | /* Ack: irqs disabled. */ | ||
52 | smp_mb(); /* Must read state first. */ | ||
53 | atomic_inc(&stopmachine_thread_ack); | ||
54 | } else if (stopmachine_state == STOPMACHINE_PREPARE | ||
55 | && !prepared) { | ||
56 | /* Everyone is in place, hold CPU. */ | ||
57 | preempt_disable(); | ||
58 | prepared = 1; | ||
59 | smp_mb(); /* Must read state first. */ | ||
60 | atomic_inc(&stopmachine_thread_ack); | ||
61 | } | ||
62 | /* Yield in first stage: migration threads need to | ||
63 | * help our sisters onto their CPUs. */ | ||
64 | if (!prepared && !irqs_disabled) | ||
65 | yield(); | ||
66 | cpu_relax(); | ||
67 | } | ||
68 | |||
69 | /* Ack: we are exiting. */ | ||
70 | smp_mb(); /* Must read state first. */ | ||
71 | atomic_inc(&stopmachine_thread_ack); | ||
72 | |||
73 | if (irqs_disabled) | ||
74 | local_irq_enable(); | ||
75 | if (prepared) | ||
76 | preempt_enable(); | ||
77 | 36 | ||
78 | return 0; | 37 | /* Like num_online_cpus(), but hotplug cpu uses us, so we need this. */ |
79 | } | 38 | static unsigned int num_threads; |
39 | static atomic_t thread_ack; | ||
40 | static struct completion finished; | ||
41 | static DEFINE_MUTEX(lock); | ||
80 | 42 | ||
81 | /* Change the thread state */ | 43 | static void set_state(enum stopmachine_state newstate) |
82 | static void stopmachine_set_state(enum stopmachine_state state) | ||
83 | { | 44 | { |
84 | atomic_set(&stopmachine_thread_ack, 0); | 45 | /* Reset ack counter. */ |
46 | atomic_set(&thread_ack, num_threads); | ||
85 | smp_wmb(); | 47 | smp_wmb(); |
86 | stopmachine_state = state; | 48 | state = newstate; |
87 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) | ||
88 | cpu_relax(); | ||
89 | } | 49 | } |
90 | 50 | ||
91 | static int stop_machine(void) | 51 | /* Last one to ack a state moves to the next state. */ |
52 | static void ack_state(void) | ||
92 | { | 53 | { |
93 | int i, ret = 0; | 54 | if (atomic_dec_and_test(&thread_ack)) { |
94 | 55 | /* If we're the last one to ack the EXIT, we're finished. */ | |
95 | atomic_set(&stopmachine_thread_ack, 0); | 56 | if (state == STOPMACHINE_EXIT) |
96 | stopmachine_num_threads = 0; | 57 | complete(&finished); |
97 | stopmachine_state = STOPMACHINE_WAIT; | 58 | else |
98 | 59 | set_state(state + 1); | |
99 | for_each_online_cpu(i) { | ||
100 | if (i == raw_smp_processor_id()) | ||
101 | continue; | ||
102 | ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); | ||
103 | if (ret < 0) | ||
104 | break; | ||
105 | stopmachine_num_threads++; | ||
106 | } | ||
107 | |||
108 | /* Wait for them all to come to life. */ | ||
109 | while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) { | ||
110 | yield(); | ||
111 | cpu_relax(); | ||
112 | } | 60 | } |
61 | } | ||
113 | 62 | ||
114 | /* If some failed, kill them all. */ | 63 | /* This is the actual thread which stops the CPU. It exits by itself rather |
115 | if (ret < 0) { | 64 | * than waiting for kthread_stop(), because it's easier for hotplug CPU. */ |
116 | stopmachine_set_state(STOPMACHINE_EXIT); | 65 | static int stop_cpu(struct stop_machine_data *smdata) |
117 | return ret; | 66 | { |
118 | } | 67 | enum stopmachine_state curstate = STOPMACHINE_NONE; |
68 | int uninitialized_var(ret); | ||
119 | 69 | ||
120 | /* Now they are all started, make them hold the CPUs, ready. */ | 70 | /* Simple state machine */ |
121 | preempt_disable(); | 71 | do { |
122 | stopmachine_set_state(STOPMACHINE_PREPARE); | 72 | /* Chill out and ensure we re-read stopmachine_state. */ |
73 | cpu_relax(); | ||
74 | if (state != curstate) { | ||
75 | curstate = state; | ||
76 | switch (curstate) { | ||
77 | case STOPMACHINE_DISABLE_IRQ: | ||
78 | local_irq_disable(); | ||
79 | hard_irq_disable(); | ||
80 | break; | ||
81 | case STOPMACHINE_RUN: | ||
82 | /* |= allows error detection if functions on | ||
83 | * multiple CPUs. */ | ||
84 | smdata->fnret |= smdata->fn(smdata->data); | ||
85 | break; | ||
86 | default: | ||
87 | break; | ||
88 | } | ||
89 | ack_state(); | ||
90 | } | ||
91 | } while (curstate != STOPMACHINE_EXIT); | ||
123 | 92 | ||
124 | /* Make them disable irqs. */ | 93 | local_irq_enable(); |
125 | local_irq_disable(); | 94 | do_exit(0); |
126 | hard_irq_disable(); | 95 | } |
127 | stopmachine_set_state(STOPMACHINE_DISABLE_IRQ); | ||
128 | 96 | ||
97 | /* Callback for CPUs which aren't supposed to do anything. */ | ||
98 | static int chill(void *unused) | ||
99 | { | ||
129 | return 0; | 100 | return 0; |
130 | } | 101 | } |
131 | 102 | ||
132 | static void restart_machine(void) | 103 | int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
133 | { | 104 | { |
134 | stopmachine_set_state(STOPMACHINE_EXIT); | 105 | int i, err; |
135 | local_irq_enable(); | 106 | struct stop_machine_data active, idle; |
136 | preempt_enable_no_resched(); | 107 | struct task_struct **threads; |
137 | } | 108 | |
109 | active.fn = fn; | ||
110 | active.data = data; | ||
111 | active.fnret = 0; | ||
112 | idle.fn = chill; | ||
113 | idle.data = NULL; | ||
114 | |||
115 | /* This could be too big for stack on large machines. */ | ||
116 | threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); | ||
117 | if (!threads) | ||
118 | return -ENOMEM; | ||
119 | |||
120 | /* Set up initial state. */ | ||
121 | mutex_lock(&lock); | ||
122 | init_completion(&finished); | ||
123 | num_threads = num_online_cpus(); | ||
124 | set_state(STOPMACHINE_PREPARE); | ||
138 | 125 | ||
139 | struct stop_machine_data { | 126 | for_each_online_cpu(i) { |
140 | int (*fn)(void *); | 127 | struct stop_machine_data *smdata = &idle; |
141 | void *data; | 128 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
142 | struct completion done; | ||
143 | }; | ||
144 | 129 | ||
145 | static int do_stop(void *_smdata) | 130 | if (!cpus) { |
146 | { | 131 | if (i == first_cpu(cpu_online_map)) |
147 | struct stop_machine_data *smdata = _smdata; | 132 | smdata = &active; |
148 | int ret; | 133 | } else { |
134 | if (cpu_isset(i, *cpus)) | ||
135 | smdata = &active; | ||
136 | } | ||
149 | 137 | ||
150 | ret = stop_machine(); | 138 | threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u", |
151 | if (ret == 0) { | 139 | i); |
152 | ret = smdata->fn(smdata->data); | 140 | if (IS_ERR(threads[i])) { |
153 | restart_machine(); | 141 | err = PTR_ERR(threads[i]); |
154 | } | 142 | threads[i] = NULL; |
143 | goto kill_threads; | ||
144 | } | ||
155 | 145 | ||
156 | /* We're done: you can kthread_stop us now */ | 146 | /* Place it onto correct cpu. */ |
157 | complete(&smdata->done); | 147 | kthread_bind(threads[i], i); |
158 | 148 | ||
159 | /* Wait for kthread_stop */ | 149 | /* Make it highest prio. */ |
160 | set_current_state(TASK_INTERRUPTIBLE); | 150 | if (sched_setscheduler_nocheck(threads[i], SCHED_FIFO, ¶m)) |
161 | while (!kthread_should_stop()) { | 151 | BUG(); |
162 | schedule(); | ||
163 | set_current_state(TASK_INTERRUPTIBLE); | ||
164 | } | 152 | } |
165 | __set_current_state(TASK_RUNNING); | ||
166 | return ret; | ||
167 | } | ||
168 | 153 | ||
169 | struct task_struct *__stop_machine_run(int (*fn)(void *), void *data, | 154 | /* We've created all the threads. Wake them all: hold this CPU so one |
170 | unsigned int cpu) | 155 | * doesn't hit this CPU until we're ready. */ |
171 | { | 156 | get_cpu(); |
172 | static DEFINE_MUTEX(stopmachine_mutex); | 157 | for_each_online_cpu(i) |
173 | struct stop_machine_data smdata; | 158 | wake_up_process(threads[i]); |
174 | struct task_struct *p; | ||
175 | 159 | ||
176 | smdata.fn = fn; | 160 | /* This will release the thread on our CPU. */ |
177 | smdata.data = data; | 161 | put_cpu(); |
178 | init_completion(&smdata.done); | 162 | wait_for_completion(&finished); |
163 | mutex_unlock(&lock); | ||
179 | 164 | ||
180 | mutex_lock(&stopmachine_mutex); | 165 | kfree(threads); |
181 | 166 | ||
182 | /* If they don't care which CPU fn runs on, bind to any online one. */ | 167 | return active.fnret; |
183 | if (cpu == NR_CPUS) | ||
184 | cpu = raw_smp_processor_id(); | ||
185 | 168 | ||
186 | p = kthread_create(do_stop, &smdata, "kstopmachine"); | 169 | kill_threads: |
187 | if (!IS_ERR(p)) { | 170 | for_each_online_cpu(i) |
188 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 171 | if (threads[i]) |
172 | kthread_stop(threads[i]); | ||
173 | mutex_unlock(&lock); | ||
189 | 174 | ||
190 | /* One high-prio thread per cpu. We'll do this one. */ | 175 | kfree(threads); |
191 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | 176 | return err; |
192 | kthread_bind(p, cpu); | ||
193 | wake_up_process(p); | ||
194 | wait_for_completion(&smdata.done); | ||
195 | } | ||
196 | mutex_unlock(&stopmachine_mutex); | ||
197 | return p; | ||
198 | } | 177 | } |
199 | 178 | ||
200 | int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) | 179 | int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus) |
201 | { | 180 | { |
202 | struct task_struct *p; | ||
203 | int ret; | 181 | int ret; |
204 | 182 | ||
205 | /* No CPUs can come up or down during this. */ | 183 | /* No CPUs can come up or down during this. */ |
206 | get_online_cpus(); | 184 | get_online_cpus(); |
207 | p = __stop_machine_run(fn, data, cpu); | 185 | ret = __stop_machine(fn, data, cpus); |
208 | if (!IS_ERR(p)) | ||
209 | ret = kthread_stop(p); | ||
210 | else | ||
211 | ret = PTR_ERR(p); | ||
212 | put_online_cpus(); | 186 | put_online_cpus(); |
213 | 187 | ||
214 | return ret; | 188 | return ret; |
215 | } | 189 | } |
216 | EXPORT_SYMBOL_GPL(stop_machine_run); | 190 | EXPORT_SYMBOL_GPL(stop_machine); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 4231a3dc224a..f6e3af31b403 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -587,7 +587,7 @@ static int __ftrace_modify_code(void *data) | |||
587 | 587 | ||
588 | static void ftrace_run_update_code(int command) | 588 | static void ftrace_run_update_code(int command) |
589 | { | 589 | { |
590 | stop_machine_run(__ftrace_modify_code, &command, NR_CPUS); | 590 | stop_machine(__ftrace_modify_code, &command, NULL); |
591 | } | 591 | } |
592 | 592 | ||
593 | void ftrace_disable_daemon(void) | 593 | void ftrace_disable_daemon(void) |
@@ -787,7 +787,7 @@ static int ftrace_update_code(void) | |||
787 | !ftrace_enabled || !ftraced_trigger) | 787 | !ftrace_enabled || !ftraced_trigger) |
788 | return 0; | 788 | return 0; |
789 | 789 | ||
790 | stop_machine_run(__ftrace_update_code, NULL, NR_CPUS); | 790 | stop_machine(__ftrace_update_code, NULL, NULL); |
791 | 791 | ||
792 | return 1; | 792 | return 1; |
793 | } | 793 | } |
@@ -1564,7 +1564,7 @@ static int __init ftrace_dynamic_init(void) | |||
1564 | 1564 | ||
1565 | addr = (unsigned long)ftrace_record_ip; | 1565 | addr = (unsigned long)ftrace_record_ip; |
1566 | 1566 | ||
1567 | stop_machine_run(ftrace_dyn_arch_init, &addr, NR_CPUS); | 1567 | stop_machine(ftrace_dyn_arch_init, &addr, NULL); |
1568 | 1568 | ||
1569 | /* ftrace_dyn_arch_init places the return code in addr */ | 1569 | /* ftrace_dyn_arch_init places the return code in addr */ |
1570 | if (addr) { | 1570 | if (addr) { |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 6da667274df5..3cf3d05b6bd4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2372,7 +2372,7 @@ static void build_zonelist_cache(pg_data_t *pgdat) | |||
2372 | 2372 | ||
2373 | #endif /* CONFIG_NUMA */ | 2373 | #endif /* CONFIG_NUMA */ |
2374 | 2374 | ||
2375 | /* return values int ....just for stop_machine_run() */ | 2375 | /* return values int ....just for stop_machine() */ |
2376 | static int __build_all_zonelists(void *dummy) | 2376 | static int __build_all_zonelists(void *dummy) |
2377 | { | 2377 | { |
2378 | int nid; | 2378 | int nid; |
@@ -2397,7 +2397,7 @@ void build_all_zonelists(void) | |||
2397 | } else { | 2397 | } else { |
2398 | /* we have to stop all cpus to guarantee there is no user | 2398 | /* we have to stop all cpus to guarantee there is no user |
2399 | of zonelist */ | 2399 | of zonelist */ |
2400 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); | 2400 | stop_machine(__build_all_zonelists, NULL, NULL); |
2401 | /* cpuset refresh routine should be here */ | 2401 | /* cpuset refresh routine should be here */ |
2402 | } | 2402 | } |
2403 | vm_total_pages = nr_free_pagecache_pages(); | 2403 | vm_total_pages = nr_free_pagecache_pages(); |