aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/smp.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/smp.c')
-rw-r--r--kernel/smp.c139
1 files changed, 65 insertions, 74 deletions
diff --git a/kernel/smp.c b/kernel/smp.c
index ffee35bef179..06d574e42c72 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd)
117 csd->flags &= ~CSD_FLAG_LOCK; 117 csd->flags &= ~CSD_FLAG_LOCK;
118} 118}
119 119
120static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
121
120/* 122/*
121 * Insert a previously allocated call_single_data element 123 * Insert a previously allocated call_single_data element
122 * for execution on the given CPU. data must already have 124 * for execution on the given CPU. data must already have
123 * ->func, ->info, and ->flags set. 125 * ->func, ->info, and ->flags set.
124 */ 126 */
125static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) 127static int generic_exec_single(int cpu, struct call_single_data *csd,
128 smp_call_func_t func, void *info, int wait)
126{ 129{
130 struct call_single_data csd_stack = { .flags = 0 };
131 unsigned long flags;
132
133
134 if (cpu == smp_processor_id()) {
135 local_irq_save(flags);
136 func(info);
137 local_irq_restore(flags);
138 return 0;
139 }
140
141
142 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
143 return -ENXIO;
144
145
146 if (!csd) {
147 csd = &csd_stack;
148 if (!wait)
149 csd = &__get_cpu_var(csd_data);
150 }
151
152 csd_lock(csd);
153
154 csd->func = func;
155 csd->info = info;
156
127 if (wait) 157 if (wait)
128 csd->flags |= CSD_FLAG_WAIT; 158 csd->flags |= CSD_FLAG_WAIT;
129 159
@@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
143 173
144 if (wait) 174 if (wait)
145 csd_lock_wait(csd); 175 csd_lock_wait(csd);
176
177 return 0;
146} 178}
147 179
148/* 180/*
@@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
151 */ 183 */
152void generic_smp_call_function_single_interrupt(void) 184void generic_smp_call_function_single_interrupt(void)
153{ 185{
154 struct llist_node *entry, *next; 186 struct llist_node *entry;
187 struct call_single_data *csd, *csd_next;
155 188
156 /* 189 /*
157 * Shouldn't receive this interrupt on a cpu that is not yet online. 190 * Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void)
161 entry = llist_del_all(&__get_cpu_var(call_single_queue)); 194 entry = llist_del_all(&__get_cpu_var(call_single_queue));
162 entry = llist_reverse_order(entry); 195 entry = llist_reverse_order(entry);
163 196
164 while (entry) { 197 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
165 struct call_single_data *csd;
166
167 next = entry->next;
168
169 csd = llist_entry(entry, struct call_single_data, llist);
170 csd->func(csd->info); 198 csd->func(csd->info);
171 csd_unlock(csd); 199 csd_unlock(csd);
172
173 entry = next;
174 } 200 }
175} 201}
176 202
177static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
178
179/* 203/*
180 * smp_call_function_single - Run a function on a specific CPU 204 * smp_call_function_single - Run a function on a specific CPU
181 * @func: The function to run. This must be fast and non-blocking. 205 * @func: The function to run. This must be fast and non-blocking.
@@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
187int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 211int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
188 int wait) 212 int wait)
189{ 213{
190 struct call_single_data d = {
191 .flags = 0,
192 };
193 unsigned long flags;
194 int this_cpu; 214 int this_cpu;
195 int err = 0; 215 int err;
196 216
197 /* 217 /*
198 * prevent preemption and reschedule on another processor, 218 * prevent preemption and reschedule on another processor,
@@ -209,32 +229,41 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
209 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 229 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
210 && !oops_in_progress); 230 && !oops_in_progress);
211 231
212 if (cpu == this_cpu) { 232 err = generic_exec_single(cpu, NULL, func, info, wait);
213 local_irq_save(flags);
214 func(info);
215 local_irq_restore(flags);
216 } else {
217 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
218 struct call_single_data *csd = &d;
219 233
220 if (!wait) 234 put_cpu();
221 csd = &__get_cpu_var(csd_data);
222 235
223 csd_lock(csd); 236 return err;
237}
238EXPORT_SYMBOL(smp_call_function_single);
224 239
225 csd->func = func; 240/**
226 csd->info = info; 241 * smp_call_function_single_async(): Run an asynchronous function on a
227 generic_exec_single(cpu, csd, wait); 242 * specific CPU.
228 } else { 243 * @cpu: The CPU to run on.
229 err = -ENXIO; /* CPU not online */ 244 * @csd: Pre-allocated and setup data structure
230 } 245 *
231 } 246 * Like smp_call_function_single(), but the call is asynchonous and
247 * can thus be done from contexts with disabled interrupts.
248 *
249 * The caller passes his own pre-allocated data structure
250 * (ie: embedded in an object) and is responsible for synchronizing it
251 * such that the IPIs performed on the @csd are strictly serialized.
252 *
253 * NOTE: Be careful, there is unfortunately no current debugging facility to
254 * validate the correctness of this serialization.
255 */
256int smp_call_function_single_async(int cpu, struct call_single_data *csd)
257{
258 int err = 0;
232 259
233 put_cpu(); 260 preempt_disable();
261 err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
262 preempt_enable();
234 263
235 return err; 264 return err;
236} 265}
237EXPORT_SYMBOL(smp_call_function_single); 266EXPORT_SYMBOL_GPL(smp_call_function_single_async);
238 267
239/* 268/*
240 * smp_call_function_any - Run a function on any of the given cpus 269 * smp_call_function_any - Run a function on any of the given cpus
@@ -280,44 +309,6 @@ call:
280EXPORT_SYMBOL_GPL(smp_call_function_any); 309EXPORT_SYMBOL_GPL(smp_call_function_any);
281 310
282/** 311/**
283 * __smp_call_function_single(): Run a function on a specific CPU
284 * @cpu: The CPU to run on.
285 * @data: Pre-allocated and setup data structure
286 * @wait: If true, wait until function has completed on specified CPU.
287 *
288 * Like smp_call_function_single(), but allow caller to pass in a
289 * pre-allocated data structure. Useful for embedding @data inside
290 * other structures, for instance.
291 */
292void __smp_call_function_single(int cpu, struct call_single_data *csd,
293 int wait)
294{
295 unsigned int this_cpu;
296 unsigned long flags;
297
298 this_cpu = get_cpu();
299 /*
300 * Can deadlock when called with interrupts disabled.
301 * We allow cpu's that are not yet online though, as no one else can
302 * send smp call function interrupt to this cpu and as such deadlocks
303 * can't happen.
304 */
305 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
306 && !oops_in_progress);
307
308 if (cpu == this_cpu) {
309 local_irq_save(flags);
310 csd->func(csd->info);
311 local_irq_restore(flags);
312 } else {
313 csd_lock(csd);
314 generic_exec_single(cpu, csd, wait);
315 }
316 put_cpu();
317}
318EXPORT_SYMBOL_GPL(__smp_call_function_single);
319
320/**
321 * smp_call_function_many(): Run a function on a set of other CPUs. 312 * smp_call_function_many(): Run a function on a set of other CPUs.
322 * @mask: The set of cpus to run on (only runs on online subset). 313 * @mask: The set of cpus to run on (only runs on online subset).
323 * @func: The function to run. This must be fast and non-blocking. 314 * @func: The function to run. This must be fast and non-blocking.