aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-07-28 13:16:30 -0400
committerRusty Russell <rusty@rustcorp.com.au>2008-07-27 22:16:30 -0400
commiteeec4fad963490821348a331cca6102ae1c4a7a3 (patch)
tree163a7d9414d719fccac096d1ba822416f705b397 /kernel
parent04321587584272f4e8b9818f319f40caf8eeee13 (diff)
stop_machine(): stop_machine_run() changed to use cpu mask
Instead of a "cpu" arg with magic values NR_CPUS (any cpu) and ~0 (all cpus), pass a cpumask_t. Allow NULL for the common case (where we don't care which CPU the function is run on): temporary cpumask_t's are usually considered bad for stack space. This deprecates stop_machine_run, to be removed soon when all the callers are dead. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c3
-rw-r--r--kernel/stop_machine.c27
2 files changed, 15 insertions, 15 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 53cf508f975..29510d68338 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -248,8 +248,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
248 cpus_setall(tmp); 248 cpus_setall(tmp);
249 cpu_clear(cpu, tmp); 249 cpu_clear(cpu, tmp);
250 set_cpus_allowed_ptr(current, &tmp); 250 set_cpus_allowed_ptr(current, &tmp);
251 tmp = cpumask_of_cpu(cpu);
251 252
252 err = __stop_machine_run(take_cpu_down, &tcd_param, cpu); 253 err = __stop_machine(take_cpu_down, &tcd_param, &tmp);
253 if (err) { 254 if (err) {
254 /* CPU didn't die: tell everyone. Can't complain. */ 255 /* CPU didn't die: tell everyone. Can't complain. */
255 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 256 if (raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 35882dccc94..e446c7c7d6a 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -100,7 +100,7 @@ static int chill(void *unused)
100 return 0; 100 return 0;
101} 101}
102 102
103int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 103int __stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
104{ 104{
105 int i, err; 105 int i, err;
106 struct stop_machine_data active, idle; 106 struct stop_machine_data active, idle;
@@ -112,10 +112,6 @@ int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
112 idle.fn = chill; 112 idle.fn = chill;
113 idle.data = NULL; 113 idle.data = NULL;
114 114
115 /* If they don't care which cpu fn runs on, just pick one. */
116 if (cpu == NR_CPUS)
117 cpu = any_online_cpu(cpu_online_map);
118
119 /* This could be too big for stack on large machines. */ 115 /* This could be too big for stack on large machines. */
120 threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL); 116 threads = kcalloc(NR_CPUS, sizeof(threads[0]), GFP_KERNEL);
121 if (!threads) 117 if (!threads)
@@ -128,13 +124,16 @@ int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
128 set_state(STOPMACHINE_PREPARE); 124 set_state(STOPMACHINE_PREPARE);
129 125
130 for_each_online_cpu(i) { 126 for_each_online_cpu(i) {
131 struct stop_machine_data *smdata; 127 struct stop_machine_data *smdata = &idle;
132 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; 128 struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
133 129
134 if (cpu == ALL_CPUS || i == cpu) 130 if (!cpus) {
135 smdata = &active; 131 if (i == first_cpu(cpu_online_map))
136 else 132 smdata = &active;
137 smdata = &idle; 133 } else {
134 if (cpu_isset(i, *cpus))
135 smdata = &active;
136 }
138 137
139 threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u", 138 threads[i] = kthread_create((void *)stop_cpu, smdata, "kstop%u",
140 i); 139 i);
@@ -154,7 +153,7 @@ int __stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu)
154 153
155 /* We've created all the threads. Wake them all: hold this CPU so one 154 /* We've created all the threads. Wake them all: hold this CPU so one
156 * doesn't hit this CPU until we're ready. */ 155 * doesn't hit this CPU until we're ready. */
157 cpu = get_cpu(); 156 get_cpu();
158 for_each_online_cpu(i) 157 for_each_online_cpu(i)
159 wake_up_process(threads[i]); 158 wake_up_process(threads[i]);
160 159
@@ -177,15 +176,15 @@ kill_threads:
177 return err; 176 return err;
178} 177}
179 178
180int stop_machine_run(int (*fn)(void *), void *data, unsigned int cpu) 179int stop_machine(int (*fn)(void *), void *data, const cpumask_t *cpus)
181{ 180{
182 int ret; 181 int ret;
183 182
184 /* No CPUs can come up or down during this. */ 183 /* No CPUs can come up or down during this. */
185 get_online_cpus(); 184 get_online_cpus();
186 ret = __stop_machine_run(fn, data, cpu); 185 ret = __stop_machine(fn, data, cpus);
187 put_online_cpus(); 186 put_online_cpus();
188 187
189 return ret; 188 return ret;
190} 189}
191EXPORT_SYMBOL_GPL(stop_machine_run); 190EXPORT_SYMBOL_GPL(stop_machine);