diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/arm/kernel/smp.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/arm/kernel/smp.c')
-rw-r--r-- | arch/arm/kernel/smp.c | 396 |
1 files changed, 396 insertions, 0 deletions
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c new file mode 100644 index 000000000000..ecc8c3332408 --- /dev/null +++ b/arch/arm/kernel/smp.c | |||
@@ -0,0 +1,396 @@ | |||
1 | /* | ||
2 | * linux/arch/arm/kernel/smp.c | ||
3 | * | ||
4 | * Copyright (C) 2002 ARM Limited, All Rights Reserved. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | #include <linux/config.h> | ||
11 | #include <linux/delay.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/spinlock.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/cache.h> | ||
17 | #include <linux/profile.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/smp.h> | ||
22 | #include <linux/seq_file.h> | ||
23 | |||
24 | #include <asm/atomic.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/cpu.h> | ||
27 | #include <asm/processor.h> | ||
28 | #include <asm/tlbflush.h> | ||
29 | #include <asm/ptrace.h> | ||
30 | |||
31 | /* | ||
32 | * bitmask of present and online CPUs. | ||
33 | * The present bitmask indicates that the CPU is physically present. | ||
34 | * The online bitmask indicates that the CPU is up and running. | ||
35 | */ | ||
36 | cpumask_t cpu_present_mask; | ||
37 | cpumask_t cpu_online_map; | ||
38 | |||
39 | /* | ||
40 | * structures for inter-processor calls | ||
41 | * - A collection of single bit ipi messages. | ||
42 | */ | ||
43 | struct ipi_data { | ||
44 | spinlock_t lock; | ||
45 | unsigned long ipi_count; | ||
46 | unsigned long bits; | ||
47 | }; | ||
48 | |||
49 | static DEFINE_PER_CPU(struct ipi_data, ipi_data) = { | ||
50 | .lock = SPIN_LOCK_UNLOCKED, | ||
51 | }; | ||
52 | |||
53 | enum ipi_msg_type { | ||
54 | IPI_TIMER, | ||
55 | IPI_RESCHEDULE, | ||
56 | IPI_CALL_FUNC, | ||
57 | IPI_CPU_STOP, | ||
58 | }; | ||
59 | |||
60 | struct smp_call_struct { | ||
61 | void (*func)(void *info); | ||
62 | void *info; | ||
63 | int wait; | ||
64 | cpumask_t pending; | ||
65 | cpumask_t unfinished; | ||
66 | }; | ||
67 | |||
68 | static struct smp_call_struct * volatile smp_call_function_data; | ||
69 | static DEFINE_SPINLOCK(smp_call_function_lock); | ||
70 | |||
71 | int __init __cpu_up(unsigned int cpu) | ||
72 | { | ||
73 | struct task_struct *idle; | ||
74 | int ret; | ||
75 | |||
76 | /* | ||
77 | * Spawn a new process manually. Grab a pointer to | ||
78 | * its task struct so we can mess with it | ||
79 | */ | ||
80 | idle = fork_idle(cpu); | ||
81 | if (IS_ERR(idle)) { | ||
82 | printk(KERN_ERR "CPU%u: fork() failed\n", cpu); | ||
83 | return PTR_ERR(idle); | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Now bring the CPU into our world. | ||
88 | */ | ||
89 | ret = boot_secondary(cpu, idle); | ||
90 | if (ret) { | ||
91 | printk(KERN_CRIT "cpu_up: processor %d failed to boot\n", cpu); | ||
92 | /* | ||
93 | * FIXME: We need to clean up the new idle thread. --rmk | ||
94 | */ | ||
95 | } | ||
96 | |||
97 | return ret; | ||
98 | } | ||
99 | |||
100 | /* | ||
101 | * Called by both boot and secondaries to move global data into | ||
102 | * per-processor storage. | ||
103 | */ | ||
104 | void __init smp_store_cpu_info(unsigned int cpuid) | ||
105 | { | ||
106 | struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpuid); | ||
107 | |||
108 | cpu_info->loops_per_jiffy = loops_per_jiffy; | ||
109 | } | ||
110 | |||
111 | void __init smp_cpus_done(unsigned int max_cpus) | ||
112 | { | ||
113 | int cpu; | ||
114 | unsigned long bogosum = 0; | ||
115 | |||
116 | for_each_online_cpu(cpu) | ||
117 | bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy; | ||
118 | |||
119 | printk(KERN_INFO "SMP: Total of %d processors activated " | ||
120 | "(%lu.%02lu BogoMIPS).\n", | ||
121 | num_online_cpus(), | ||
122 | bogosum / (500000/HZ), | ||
123 | (bogosum / (5000/HZ)) % 100); | ||
124 | } | ||
125 | |||
126 | void __init smp_prepare_boot_cpu(void) | ||
127 | { | ||
128 | unsigned int cpu = smp_processor_id(); | ||
129 | |||
130 | cpu_set(cpu, cpu_present_mask); | ||
131 | cpu_set(cpu, cpu_online_map); | ||
132 | } | ||
133 | |||
134 | static void send_ipi_message(cpumask_t callmap, enum ipi_msg_type msg) | ||
135 | { | ||
136 | unsigned long flags; | ||
137 | unsigned int cpu; | ||
138 | |||
139 | local_irq_save(flags); | ||
140 | |||
141 | for_each_cpu_mask(cpu, callmap) { | ||
142 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
143 | |||
144 | spin_lock(&ipi->lock); | ||
145 | ipi->bits |= 1 << msg; | ||
146 | spin_unlock(&ipi->lock); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Call the platform specific cross-CPU call function. | ||
151 | */ | ||
152 | smp_cross_call(callmap); | ||
153 | |||
154 | local_irq_restore(flags); | ||
155 | } | ||
156 | |||
157 | /* | ||
158 | * You must not call this function with disabled interrupts, from a | ||
159 | * hardware interrupt handler, nor from a bottom half handler. | ||
160 | */ | ||
161 | int smp_call_function_on_cpu(void (*func)(void *info), void *info, int retry, | ||
162 | int wait, cpumask_t callmap) | ||
163 | { | ||
164 | struct smp_call_struct data; | ||
165 | unsigned long timeout; | ||
166 | int ret = 0; | ||
167 | |||
168 | data.func = func; | ||
169 | data.info = info; | ||
170 | data.wait = wait; | ||
171 | |||
172 | cpu_clear(smp_processor_id(), callmap); | ||
173 | if (cpus_empty(callmap)) | ||
174 | goto out; | ||
175 | |||
176 | data.pending = callmap; | ||
177 | if (wait) | ||
178 | data.unfinished = callmap; | ||
179 | |||
180 | /* | ||
181 | * try to get the mutex on smp_call_function_data | ||
182 | */ | ||
183 | spin_lock(&smp_call_function_lock); | ||
184 | smp_call_function_data = &data; | ||
185 | |||
186 | send_ipi_message(callmap, IPI_CALL_FUNC); | ||
187 | |||
188 | timeout = jiffies + HZ; | ||
189 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
190 | barrier(); | ||
191 | |||
192 | /* | ||
193 | * did we time out? | ||
194 | */ | ||
195 | if (!cpus_empty(data.pending)) { | ||
196 | /* | ||
197 | * this may be causing our panic - report it | ||
198 | */ | ||
199 | printk(KERN_CRIT | ||
200 | "CPU%u: smp_call_function timeout for %p(%p)\n" | ||
201 | " callmap %lx pending %lx, %swait\n", | ||
202 | smp_processor_id(), func, info, callmap, data.pending, | ||
203 | wait ? "" : "no "); | ||
204 | |||
205 | /* | ||
206 | * TRACE | ||
207 | */ | ||
208 | timeout = jiffies + (5 * HZ); | ||
209 | while (!cpus_empty(data.pending) && time_before(jiffies, timeout)) | ||
210 | barrier(); | ||
211 | |||
212 | if (cpus_empty(data.pending)) | ||
213 | printk(KERN_CRIT " RESOLVED\n"); | ||
214 | else | ||
215 | printk(KERN_CRIT " STILL STUCK\n"); | ||
216 | } | ||
217 | |||
218 | /* | ||
219 | * whatever happened, we're done with the data, so release it | ||
220 | */ | ||
221 | smp_call_function_data = NULL; | ||
222 | spin_unlock(&smp_call_function_lock); | ||
223 | |||
224 | if (!cpus_empty(data.pending)) { | ||
225 | ret = -ETIMEDOUT; | ||
226 | goto out; | ||
227 | } | ||
228 | |||
229 | if (wait) | ||
230 | while (!cpus_empty(data.unfinished)) | ||
231 | barrier(); | ||
232 | out: | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | int smp_call_function(void (*func)(void *info), void *info, int retry, | ||
238 | int wait) | ||
239 | { | ||
240 | return smp_call_function_on_cpu(func, info, retry, wait, | ||
241 | cpu_online_map); | ||
242 | } | ||
243 | |||
244 | void show_ipi_list(struct seq_file *p) | ||
245 | { | ||
246 | unsigned int cpu; | ||
247 | |||
248 | seq_puts(p, "IPI:"); | ||
249 | |||
250 | for_each_online_cpu(cpu) | ||
251 | seq_printf(p, " %10lu", per_cpu(ipi_data, cpu).ipi_count); | ||
252 | |||
253 | seq_putc(p, '\n'); | ||
254 | } | ||
255 | |||
256 | static void ipi_timer(struct pt_regs *regs) | ||
257 | { | ||
258 | int user = user_mode(regs); | ||
259 | |||
260 | irq_enter(); | ||
261 | profile_tick(CPU_PROFILING, regs); | ||
262 | update_process_times(user); | ||
263 | irq_exit(); | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * ipi_call_function - handle IPI from smp_call_function() | ||
268 | * | ||
269 | * Note that we copy data out of the cross-call structure and then | ||
270 | * let the caller know that we're here and have done with their data | ||
271 | */ | ||
272 | static void ipi_call_function(unsigned int cpu) | ||
273 | { | ||
274 | struct smp_call_struct *data = smp_call_function_data; | ||
275 | void (*func)(void *info) = data->func; | ||
276 | void *info = data->info; | ||
277 | int wait = data->wait; | ||
278 | |||
279 | cpu_clear(cpu, data->pending); | ||
280 | |||
281 | func(info); | ||
282 | |||
283 | if (wait) | ||
284 | cpu_clear(cpu, data->unfinished); | ||
285 | } | ||
286 | |||
287 | static DEFINE_SPINLOCK(stop_lock); | ||
288 | |||
289 | /* | ||
290 | * ipi_cpu_stop - handle IPI from smp_send_stop() | ||
291 | */ | ||
292 | static void ipi_cpu_stop(unsigned int cpu) | ||
293 | { | ||
294 | spin_lock(&stop_lock); | ||
295 | printk(KERN_CRIT "CPU%u: stopping\n", cpu); | ||
296 | dump_stack(); | ||
297 | spin_unlock(&stop_lock); | ||
298 | |||
299 | cpu_clear(cpu, cpu_online_map); | ||
300 | |||
301 | local_fiq_disable(); | ||
302 | local_irq_disable(); | ||
303 | |||
304 | while (1) | ||
305 | cpu_relax(); | ||
306 | } | ||
307 | |||
308 | /* | ||
309 | * Main handler for inter-processor interrupts | ||
310 | * | ||
311 | * For ARM, the ipimask now only identifies a single | ||
312 | * category of IPI (Bit 1 IPIs have been replaced by a | ||
313 | * different mechanism): | ||
314 | * | ||
315 | * Bit 0 - Inter-processor function call | ||
316 | */ | ||
317 | void do_IPI(struct pt_regs *regs) | ||
318 | { | ||
319 | unsigned int cpu = smp_processor_id(); | ||
320 | struct ipi_data *ipi = &per_cpu(ipi_data, cpu); | ||
321 | |||
322 | ipi->ipi_count++; | ||
323 | |||
324 | for (;;) { | ||
325 | unsigned long msgs; | ||
326 | |||
327 | spin_lock(&ipi->lock); | ||
328 | msgs = ipi->bits; | ||
329 | ipi->bits = 0; | ||
330 | spin_unlock(&ipi->lock); | ||
331 | |||
332 | if (!msgs) | ||
333 | break; | ||
334 | |||
335 | do { | ||
336 | unsigned nextmsg; | ||
337 | |||
338 | nextmsg = msgs & -msgs; | ||
339 | msgs &= ~nextmsg; | ||
340 | nextmsg = ffz(~nextmsg); | ||
341 | |||
342 | switch (nextmsg) { | ||
343 | case IPI_TIMER: | ||
344 | ipi_timer(regs); | ||
345 | break; | ||
346 | |||
347 | case IPI_RESCHEDULE: | ||
348 | /* | ||
349 | * nothing more to do - eveything is | ||
350 | * done on the interrupt return path | ||
351 | */ | ||
352 | break; | ||
353 | |||
354 | case IPI_CALL_FUNC: | ||
355 | ipi_call_function(cpu); | ||
356 | break; | ||
357 | |||
358 | case IPI_CPU_STOP: | ||
359 | ipi_cpu_stop(cpu); | ||
360 | break; | ||
361 | |||
362 | default: | ||
363 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", | ||
364 | cpu, nextmsg); | ||
365 | break; | ||
366 | } | ||
367 | } while (msgs); | ||
368 | } | ||
369 | } | ||
370 | |||
371 | void smp_send_reschedule(int cpu) | ||
372 | { | ||
373 | send_ipi_message(cpumask_of_cpu(cpu), IPI_RESCHEDULE); | ||
374 | } | ||
375 | |||
376 | void smp_send_timer(void) | ||
377 | { | ||
378 | cpumask_t mask = cpu_online_map; | ||
379 | cpu_clear(smp_processor_id(), mask); | ||
380 | send_ipi_message(mask, IPI_TIMER); | ||
381 | } | ||
382 | |||
383 | void smp_send_stop(void) | ||
384 | { | ||
385 | cpumask_t mask = cpu_online_map; | ||
386 | cpu_clear(smp_processor_id(), mask); | ||
387 | send_ipi_message(mask, IPI_CPU_STOP); | ||
388 | } | ||
389 | |||
390 | /* | ||
391 | * not supported here | ||
392 | */ | ||
393 | int __init setup_profiling_timer(unsigned int multiplier) | ||
394 | { | ||
395 | return -EINVAL; | ||
396 | } | ||