diff options
Diffstat (limited to 'arch/blackfin/mach-common/smp.c')
-rw-r--r-- | arch/blackfin/mach-common/smp.c | 204 |
1 files changed, 79 insertions, 125 deletions
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index a17107a700d5..9f251406a76a 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/mm.h> | 19 | #include <linux/mm.h> |
20 | #include <linux/cpu.h> | 20 | #include <linux/cpu.h> |
21 | #include <linux/smp.h> | 21 | #include <linux/smp.h> |
22 | #include <linux/cpumask.h> | ||
22 | #include <linux/seq_file.h> | 23 | #include <linux/seq_file.h> |
23 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
24 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
@@ -43,12 +44,6 @@ void __cpuinitdata *init_retx_coreb, *init_saved_retx_coreb, | |||
43 | *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, | 44 | *init_saved_seqstat_coreb, *init_saved_icplb_fault_addr_coreb, |
44 | *init_saved_dcplb_fault_addr_coreb; | 45 | *init_saved_dcplb_fault_addr_coreb; |
45 | 46 | ||
46 | cpumask_t cpu_possible_map; | ||
47 | EXPORT_SYMBOL(cpu_possible_map); | ||
48 | |||
49 | cpumask_t cpu_online_map; | ||
50 | EXPORT_SYMBOL(cpu_online_map); | ||
51 | |||
52 | #define BFIN_IPI_RESCHEDULE 0 | 47 | #define BFIN_IPI_RESCHEDULE 0 |
53 | #define BFIN_IPI_CALL_FUNC 1 | 48 | #define BFIN_IPI_CALL_FUNC 1 |
54 | #define BFIN_IPI_CPU_STOP 2 | 49 | #define BFIN_IPI_CPU_STOP 2 |
@@ -65,8 +60,7 @@ struct smp_call_struct { | |||
65 | void (*func)(void *info); | 60 | void (*func)(void *info); |
66 | void *info; | 61 | void *info; |
67 | int wait; | 62 | int wait; |
68 | cpumask_t pending; | 63 | cpumask_t *waitmask; |
69 | cpumask_t waitmask; | ||
70 | }; | 64 | }; |
71 | 65 | ||
72 | static struct blackfin_flush_data smp_flush_data; | 66 | static struct blackfin_flush_data smp_flush_data; |
@@ -74,15 +68,19 @@ static struct blackfin_flush_data smp_flush_data; | |||
74 | static DEFINE_SPINLOCK(stop_lock); | 68 | static DEFINE_SPINLOCK(stop_lock); |
75 | 69 | ||
76 | struct ipi_message { | 70 | struct ipi_message { |
77 | struct list_head list; | ||
78 | unsigned long type; | 71 | unsigned long type; |
79 | struct smp_call_struct call_struct; | 72 | struct smp_call_struct call_struct; |
80 | }; | 73 | }; |
81 | 74 | ||
75 | /* A magic number - stress test shows this is safe for common cases */ | ||
76 | #define BFIN_IPI_MSGQ_LEN 5 | ||
77 | |||
78 | /* Simple FIFO buffer, overflow leads to panic */ | ||
82 | struct ipi_message_queue { | 79 | struct ipi_message_queue { |
83 | struct list_head head; | ||
84 | spinlock_t lock; | 80 | spinlock_t lock; |
85 | unsigned long count; | 81 | unsigned long count; |
82 | unsigned long head; /* head of the queue */ | ||
83 | struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN]; | ||
86 | }; | 84 | }; |
87 | 85 | ||
88 | static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); | 86 | static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); |
@@ -121,7 +119,6 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | |||
121 | func = msg->call_struct.func; | 119 | func = msg->call_struct.func; |
122 | info = msg->call_struct.info; | 120 | info = msg->call_struct.info; |
123 | wait = msg->call_struct.wait; | 121 | wait = msg->call_struct.wait; |
124 | cpu_clear(cpu, msg->call_struct.pending); | ||
125 | func(info); | 122 | func(info); |
126 | if (wait) { | 123 | if (wait) { |
127 | #ifdef __ARCH_SYNC_CORE_DCACHE | 124 | #ifdef __ARCH_SYNC_CORE_DCACHE |
@@ -132,51 +129,57 @@ static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | |||
132 | */ | 129 | */ |
133 | resync_core_dcache(); | 130 | resync_core_dcache(); |
134 | #endif | 131 | #endif |
135 | cpu_clear(cpu, msg->call_struct.waitmask); | 132 | cpu_clear(cpu, *msg->call_struct.waitmask); |
136 | } else | 133 | } |
137 | kfree(msg); | 134 | } |
135 | |||
136 | /* Use IRQ_SUPPLE_0 to request reschedule. | ||
137 | * When returning from interrupt to user space, | ||
138 | * there is chance to reschedule */ | ||
139 | static irqreturn_t ipi_handler_int0(int irq, void *dev_instance) | ||
140 | { | ||
141 | unsigned int cpu = smp_processor_id(); | ||
142 | |||
143 | platform_clear_ipi(cpu, IRQ_SUPPLE_0); | ||
144 | return IRQ_HANDLED; | ||
138 | } | 145 | } |
139 | 146 | ||
140 | static irqreturn_t ipi_handler(int irq, void *dev_instance) | 147 | static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) |
141 | { | 148 | { |
142 | struct ipi_message *msg; | 149 | struct ipi_message *msg; |
143 | struct ipi_message_queue *msg_queue; | 150 | struct ipi_message_queue *msg_queue; |
144 | unsigned int cpu = smp_processor_id(); | 151 | unsigned int cpu = smp_processor_id(); |
152 | unsigned long flags; | ||
145 | 153 | ||
146 | platform_clear_ipi(cpu); | 154 | platform_clear_ipi(cpu, IRQ_SUPPLE_1); |
147 | 155 | ||
148 | msg_queue = &__get_cpu_var(ipi_msg_queue); | 156 | msg_queue = &__get_cpu_var(ipi_msg_queue); |
149 | msg_queue->count++; | ||
150 | 157 | ||
151 | spin_lock(&msg_queue->lock); | 158 | spin_lock_irqsave(&msg_queue->lock, flags); |
152 | while (!list_empty(&msg_queue->head)) { | 159 | |
153 | msg = list_entry(msg_queue->head.next, typeof(*msg), list); | 160 | while (msg_queue->count) { |
154 | list_del(&msg->list); | 161 | msg = &msg_queue->ipi_message[msg_queue->head]; |
155 | switch (msg->type) { | 162 | switch (msg->type) { |
156 | case BFIN_IPI_RESCHEDULE: | ||
157 | /* That's the easiest one; leave it to | ||
158 | * return_from_int. */ | ||
159 | kfree(msg); | ||
160 | break; | ||
161 | case BFIN_IPI_CALL_FUNC: | 163 | case BFIN_IPI_CALL_FUNC: |
162 | spin_unlock(&msg_queue->lock); | 164 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
163 | ipi_call_function(cpu, msg); | 165 | ipi_call_function(cpu, msg); |
164 | spin_lock(&msg_queue->lock); | 166 | spin_lock_irqsave(&msg_queue->lock, flags); |
165 | break; | 167 | break; |
166 | case BFIN_IPI_CPU_STOP: | 168 | case BFIN_IPI_CPU_STOP: |
167 | spin_unlock(&msg_queue->lock); | 169 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
168 | ipi_cpu_stop(cpu); | 170 | ipi_cpu_stop(cpu); |
169 | spin_lock(&msg_queue->lock); | 171 | spin_lock_irqsave(&msg_queue->lock, flags); |
170 | kfree(msg); | ||
171 | break; | 172 | break; |
172 | default: | 173 | default: |
173 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", | 174 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", |
174 | cpu, msg->type); | 175 | cpu, msg->type); |
175 | kfree(msg); | ||
176 | break; | 176 | break; |
177 | } | 177 | } |
178 | msg_queue->head++; | ||
179 | msg_queue->head %= BFIN_IPI_MSGQ_LEN; | ||
180 | msg_queue->count--; | ||
178 | } | 181 | } |
179 | spin_unlock(&msg_queue->lock); | 182 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
180 | return IRQ_HANDLED; | 183 | return IRQ_HANDLED; |
181 | } | 184 | } |
182 | 185 | ||
@@ -186,48 +189,47 @@ static void ipi_queue_init(void) | |||
186 | struct ipi_message_queue *msg_queue; | 189 | struct ipi_message_queue *msg_queue; |
187 | for_each_possible_cpu(cpu) { | 190 | for_each_possible_cpu(cpu) { |
188 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 191 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
189 | INIT_LIST_HEAD(&msg_queue->head); | ||
190 | spin_lock_init(&msg_queue->lock); | 192 | spin_lock_init(&msg_queue->lock); |
191 | msg_queue->count = 0; | 193 | msg_queue->count = 0; |
194 | msg_queue->head = 0; | ||
192 | } | 195 | } |
193 | } | 196 | } |
194 | 197 | ||
195 | int smp_call_function(void (*func)(void *info), void *info, int wait) | 198 | static inline void smp_send_message(cpumask_t callmap, unsigned long type, |
199 | void (*func) (void *info), void *info, int wait) | ||
196 | { | 200 | { |
197 | unsigned int cpu; | 201 | unsigned int cpu; |
198 | cpumask_t callmap; | ||
199 | unsigned long flags; | ||
200 | struct ipi_message_queue *msg_queue; | 202 | struct ipi_message_queue *msg_queue; |
201 | struct ipi_message *msg; | 203 | struct ipi_message *msg; |
202 | 204 | unsigned long flags, next_msg; | |
203 | callmap = cpu_online_map; | 205 | cpumask_t waitmask = callmap; /* waitmask is shared by all cpus */ |
204 | cpu_clear(smp_processor_id(), callmap); | ||
205 | if (cpus_empty(callmap)) | ||
206 | return 0; | ||
207 | |||
208 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | ||
209 | if (!msg) | ||
210 | return -ENOMEM; | ||
211 | INIT_LIST_HEAD(&msg->list); | ||
212 | msg->call_struct.func = func; | ||
213 | msg->call_struct.info = info; | ||
214 | msg->call_struct.wait = wait; | ||
215 | msg->call_struct.pending = callmap; | ||
216 | msg->call_struct.waitmask = callmap; | ||
217 | msg->type = BFIN_IPI_CALL_FUNC; | ||
218 | 206 | ||
219 | for_each_cpu_mask(cpu, callmap) { | 207 | for_each_cpu_mask(cpu, callmap) { |
220 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 208 | msg_queue = &per_cpu(ipi_msg_queue, cpu); |
221 | spin_lock_irqsave(&msg_queue->lock, flags); | 209 | spin_lock_irqsave(&msg_queue->lock, flags); |
222 | list_add_tail(&msg->list, &msg_queue->head); | 210 | if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { |
211 | next_msg = (msg_queue->head + msg_queue->count) | ||
212 | % BFIN_IPI_MSGQ_LEN; | ||
213 | msg = &msg_queue->ipi_message[next_msg]; | ||
214 | msg->type = type; | ||
215 | if (type == BFIN_IPI_CALL_FUNC) { | ||
216 | msg->call_struct.func = func; | ||
217 | msg->call_struct.info = info; | ||
218 | msg->call_struct.wait = wait; | ||
219 | msg->call_struct.waitmask = &waitmask; | ||
220 | } | ||
221 | msg_queue->count++; | ||
222 | } else | ||
223 | panic("IPI message queue overflow\n"); | ||
223 | spin_unlock_irqrestore(&msg_queue->lock, flags); | 224 | spin_unlock_irqrestore(&msg_queue->lock, flags); |
224 | platform_send_ipi_cpu(cpu); | 225 | platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); |
225 | } | 226 | } |
227 | |||
226 | if (wait) { | 228 | if (wait) { |
227 | while (!cpus_empty(msg->call_struct.waitmask)) | 229 | while (!cpus_empty(waitmask)) |
228 | blackfin_dcache_invalidate_range( | 230 | blackfin_dcache_invalidate_range( |
229 | (unsigned long)(&msg->call_struct.waitmask), | 231 | (unsigned long)(&waitmask), |
230 | (unsigned long)(&msg->call_struct.waitmask)); | 232 | (unsigned long)(&waitmask)); |
231 | #ifdef __ARCH_SYNC_CORE_DCACHE | 233 | #ifdef __ARCH_SYNC_CORE_DCACHE |
232 | /* | 234 | /* |
233 | * Invalidate D cache in case shared data was changed by | 235 | * Invalidate D cache in case shared data was changed by |
@@ -235,8 +237,20 @@ int smp_call_function(void (*func)(void *info), void *info, int wait) | |||
235 | */ | 237 | */ |
236 | resync_core_dcache(); | 238 | resync_core_dcache(); |
237 | #endif | 239 | #endif |
238 | kfree(msg); | ||
239 | } | 240 | } |
241 | } | ||
242 | |||
243 | int smp_call_function(void (*func)(void *info), void *info, int wait) | ||
244 | { | ||
245 | cpumask_t callmap; | ||
246 | |||
247 | callmap = cpu_online_map; | ||
248 | cpu_clear(smp_processor_id(), callmap); | ||
249 | if (cpus_empty(callmap)) | ||
250 | return 0; | ||
251 | |||
252 | smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); | ||
253 | |||
240 | return 0; | 254 | return 0; |
241 | } | 255 | } |
242 | EXPORT_SYMBOL_GPL(smp_call_function); | 256 | EXPORT_SYMBOL_GPL(smp_call_function); |
@@ -246,100 +260,39 @@ int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | |||
246 | { | 260 | { |
247 | unsigned int cpu = cpuid; | 261 | unsigned int cpu = cpuid; |
248 | cpumask_t callmap; | 262 | cpumask_t callmap; |
249 | unsigned long flags; | ||
250 | struct ipi_message_queue *msg_queue; | ||
251 | struct ipi_message *msg; | ||
252 | 263 | ||
253 | if (cpu_is_offline(cpu)) | 264 | if (cpu_is_offline(cpu)) |
254 | return 0; | 265 | return 0; |
255 | cpus_clear(callmap); | 266 | cpus_clear(callmap); |
256 | cpu_set(cpu, callmap); | 267 | cpu_set(cpu, callmap); |
257 | 268 | ||
258 | msg = kmalloc(sizeof(*msg), GFP_ATOMIC); | 269 | smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); |
259 | if (!msg) | ||
260 | return -ENOMEM; | ||
261 | INIT_LIST_HEAD(&msg->list); | ||
262 | msg->call_struct.func = func; | ||
263 | msg->call_struct.info = info; | ||
264 | msg->call_struct.wait = wait; | ||
265 | msg->call_struct.pending = callmap; | ||
266 | msg->call_struct.waitmask = callmap; | ||
267 | msg->type = BFIN_IPI_CALL_FUNC; | ||
268 | |||
269 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
270 | spin_lock_irqsave(&msg_queue->lock, flags); | ||
271 | list_add_tail(&msg->list, &msg_queue->head); | ||
272 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
273 | platform_send_ipi_cpu(cpu); | ||
274 | 270 | ||
275 | if (wait) { | ||
276 | while (!cpus_empty(msg->call_struct.waitmask)) | ||
277 | blackfin_dcache_invalidate_range( | ||
278 | (unsigned long)(&msg->call_struct.waitmask), | ||
279 | (unsigned long)(&msg->call_struct.waitmask)); | ||
280 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
281 | /* | ||
282 | * Invalidate D cache in case shared data was changed by | ||
283 | * other processors to ensure cache coherence. | ||
284 | */ | ||
285 | resync_core_dcache(); | ||
286 | #endif | ||
287 | kfree(msg); | ||
288 | } | ||
289 | return 0; | 271 | return 0; |
290 | } | 272 | } |
291 | EXPORT_SYMBOL_GPL(smp_call_function_single); | 273 | EXPORT_SYMBOL_GPL(smp_call_function_single); |
292 | 274 | ||
293 | void smp_send_reschedule(int cpu) | 275 | void smp_send_reschedule(int cpu) |
294 | { | 276 | { |
295 | unsigned long flags; | 277 | /* simply trigger an ipi */ |
296 | struct ipi_message_queue *msg_queue; | ||
297 | struct ipi_message *msg; | ||
298 | |||
299 | if (cpu_is_offline(cpu)) | 278 | if (cpu_is_offline(cpu)) |
300 | return; | 279 | return; |
301 | 280 | platform_send_ipi_cpu(cpu, IRQ_SUPPLE_0); | |
302 | msg = kzalloc(sizeof(*msg), GFP_ATOMIC); | ||
303 | if (!msg) | ||
304 | return; | ||
305 | INIT_LIST_HEAD(&msg->list); | ||
306 | msg->type = BFIN_IPI_RESCHEDULE; | ||
307 | |||
308 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
309 | spin_lock_irqsave(&msg_queue->lock, flags); | ||
310 | list_add_tail(&msg->list, &msg_queue->head); | ||
311 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
312 | platform_send_ipi_cpu(cpu); | ||
313 | 281 | ||
314 | return; | 282 | return; |
315 | } | 283 | } |
316 | 284 | ||
317 | void smp_send_stop(void) | 285 | void smp_send_stop(void) |
318 | { | 286 | { |
319 | unsigned int cpu; | ||
320 | cpumask_t callmap; | 287 | cpumask_t callmap; |
321 | unsigned long flags; | ||
322 | struct ipi_message_queue *msg_queue; | ||
323 | struct ipi_message *msg; | ||
324 | 288 | ||
325 | callmap = cpu_online_map; | 289 | callmap = cpu_online_map; |
326 | cpu_clear(smp_processor_id(), callmap); | 290 | cpu_clear(smp_processor_id(), callmap); |
327 | if (cpus_empty(callmap)) | 291 | if (cpus_empty(callmap)) |
328 | return; | 292 | return; |
329 | 293 | ||
330 | msg = kzalloc(sizeof(*msg), GFP_ATOMIC); | 294 | smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); |
331 | if (!msg) | ||
332 | return; | ||
333 | INIT_LIST_HEAD(&msg->list); | ||
334 | msg->type = BFIN_IPI_CPU_STOP; | ||
335 | 295 | ||
336 | for_each_cpu_mask(cpu, callmap) { | ||
337 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | ||
338 | spin_lock_irqsave(&msg_queue->lock, flags); | ||
339 | list_add_tail(&msg->list, &msg_queue->head); | ||
340 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
341 | platform_send_ipi_cpu(cpu); | ||
342 | } | ||
343 | return; | 296 | return; |
344 | } | 297 | } |
345 | 298 | ||
@@ -446,7 +399,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
446 | { | 399 | { |
447 | platform_prepare_cpus(max_cpus); | 400 | platform_prepare_cpus(max_cpus); |
448 | ipi_queue_init(); | 401 | ipi_queue_init(); |
449 | platform_request_ipi(&ipi_handler); | 402 | platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); |
403 | platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); | ||
450 | } | 404 | } |
451 | 405 | ||
452 | void __init smp_cpus_done(unsigned int max_cpus) | 406 | void __init smp_cpus_done(unsigned int max_cpus) |