diff options
| -rw-r--r-- | arch/blackfin/Kconfig | 1 | ||||
| -rw-r--r-- | arch/blackfin/include/asm/smp.h | 2 | ||||
| -rw-r--r-- | arch/blackfin/mach-common/smp.c | 223 |
3 files changed, 69 insertions, 157 deletions
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index f34861920634..c7092e6057c5 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
| @@ -38,6 +38,7 @@ config BLACKFIN | |||
| 38 | select GENERIC_ATOMIC64 | 38 | select GENERIC_ATOMIC64 |
| 39 | select GENERIC_IRQ_PROBE | 39 | select GENERIC_IRQ_PROBE |
| 40 | select IRQ_PER_CPU if SMP | 40 | select IRQ_PER_CPU if SMP |
| 41 | select USE_GENERIC_SMP_HELPERS if SMP | ||
| 41 | select HAVE_NMI_WATCHDOG if NMI_WATCHDOG | 42 | select HAVE_NMI_WATCHDOG if NMI_WATCHDOG |
| 42 | select GENERIC_SMP_IDLE_THREAD | 43 | select GENERIC_SMP_IDLE_THREAD |
| 43 | select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS | 44 | select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS |
diff --git a/arch/blackfin/include/asm/smp.h b/arch/blackfin/include/asm/smp.h index dc3d144b4bb5..9631598dcc5d 100644 --- a/arch/blackfin/include/asm/smp.h +++ b/arch/blackfin/include/asm/smp.h | |||
| @@ -18,6 +18,8 @@ | |||
| 18 | #define raw_smp_processor_id() blackfin_core_id() | 18 | #define raw_smp_processor_id() blackfin_core_id() |
| 19 | 19 | ||
| 20 | extern void bfin_relocate_coreb_l1_mem(void); | 20 | extern void bfin_relocate_coreb_l1_mem(void); |
| 21 | extern void arch_send_call_function_single_ipi(int cpu); | ||
| 22 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | ||
| 21 | 23 | ||
| 22 | #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) | 24 | #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1) |
| 23 | asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); | 25 | asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr); |
diff --git a/arch/blackfin/mach-common/smp.c b/arch/blackfin/mach-common/smp.c index 00bbe672b3b3..a40151306b77 100644 --- a/arch/blackfin/mach-common/smp.c +++ b/arch/blackfin/mach-common/smp.c | |||
| @@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS]; | |||
| 48 | 48 | ||
| 49 | struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; | 49 | struct blackfin_initial_pda __cpuinitdata initial_pda_coreb; |
| 50 | 50 | ||
| 51 | #define BFIN_IPI_TIMER 0 | 51 | enum ipi_message_type { |
| 52 | #define BFIN_IPI_RESCHEDULE 1 | 52 | BFIN_IPI_TIMER, |
| 53 | #define BFIN_IPI_CALL_FUNC 2 | 53 | BFIN_IPI_RESCHEDULE, |
| 54 | #define BFIN_IPI_CPU_STOP 3 | 54 | BFIN_IPI_CALL_FUNC, |
| 55 | BFIN_IPI_CALL_FUNC_SINGLE, | ||
| 56 | BFIN_IPI_CPU_STOP, | ||
| 57 | }; | ||
| 55 | 58 | ||
| 56 | struct blackfin_flush_data { | 59 | struct blackfin_flush_data { |
| 57 | unsigned long start; | 60 | unsigned long start; |
| @@ -60,35 +63,20 @@ struct blackfin_flush_data { | |||
| 60 | 63 | ||
| 61 | void *secondary_stack; | 64 | void *secondary_stack; |
| 62 | 65 | ||
| 63 | |||
| 64 | struct smp_call_struct { | ||
| 65 | void (*func)(void *info); | ||
| 66 | void *info; | ||
| 67 | int wait; | ||
| 68 | cpumask_t *waitmask; | ||
| 69 | }; | ||
| 70 | |||
| 71 | static struct blackfin_flush_data smp_flush_data; | 66 | static struct blackfin_flush_data smp_flush_data; |
| 72 | 67 | ||
| 73 | static DEFINE_SPINLOCK(stop_lock); | 68 | static DEFINE_SPINLOCK(stop_lock); |
| 74 | 69 | ||
| 75 | struct ipi_message { | ||
| 76 | unsigned long type; | ||
| 77 | struct smp_call_struct call_struct; | ||
| 78 | }; | ||
| 79 | |||
| 80 | /* A magic number - stress test shows this is safe for common cases */ | 70 | /* A magic number - stress test shows this is safe for common cases */ |
| 81 | #define BFIN_IPI_MSGQ_LEN 5 | 71 | #define BFIN_IPI_MSGQ_LEN 5 |
| 82 | 72 | ||
| 83 | /* Simple FIFO buffer, overflow leads to panic */ | 73 | /* Simple FIFO buffer, overflow leads to panic */ |
| 84 | struct ipi_message_queue { | 74 | struct ipi_data { |
| 85 | spinlock_t lock; | ||
| 86 | unsigned long count; | 75 | unsigned long count; |
| 87 | unsigned long head; /* head of the queue */ | 76 | unsigned long bits; |
| 88 | struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN]; | ||
| 89 | }; | 77 | }; |
| 90 | 78 | ||
| 91 | static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue); | 79 | static DEFINE_PER_CPU(struct ipi_data, bfin_ipi); |
| 92 | 80 | ||
| 93 | static void ipi_cpu_stop(unsigned int cpu) | 81 | static void ipi_cpu_stop(unsigned int cpu) |
| 94 | { | 82 | { |
| @@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info) | |||
| 129 | blackfin_icache_flush_range(fdata->start, fdata->end); | 117 | blackfin_icache_flush_range(fdata->start, fdata->end); |
| 130 | } | 118 | } |
| 131 | 119 | ||
| 132 | static void ipi_call_function(unsigned int cpu, struct ipi_message *msg) | ||
| 133 | { | ||
| 134 | int wait; | ||
| 135 | void (*func)(void *info); | ||
| 136 | void *info; | ||
| 137 | func = msg->call_struct.func; | ||
| 138 | info = msg->call_struct.info; | ||
| 139 | wait = msg->call_struct.wait; | ||
| 140 | func(info); | ||
| 141 | if (wait) { | ||
| 142 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
| 143 | /* | ||
| 144 | * 'wait' usually means synchronization between CPUs. | ||
| 145 | * Invalidate D cache in case shared data was changed | ||
| 146 | * by func() to ensure cache coherence. | ||
| 147 | */ | ||
| 148 | resync_core_dcache(); | ||
| 149 | #endif | ||
| 150 | cpumask_clear_cpu(cpu, msg->call_struct.waitmask); | ||
| 151 | } | ||
| 152 | } | ||
| 153 | |||
| 154 | /* Use IRQ_SUPPLE_0 to request reschedule. | 120 | /* Use IRQ_SUPPLE_0 to request reschedule. |
| 155 | * When returning from interrupt to user space, | 121 | * When returning from interrupt to user space, |
| 156 | * there is chance to reschedule */ | 122 | * there is chance to reschedule */ |
| @@ -172,152 +138,95 @@ void ipi_timer(void) | |||
| 172 | 138 | ||
| 173 | static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) | 139 | static irqreturn_t ipi_handler_int1(int irq, void *dev_instance) |
| 174 | { | 140 | { |
| 175 | struct ipi_message *msg; | 141 | struct ipi_data *bfin_ipi_data; |
| 176 | struct ipi_message_queue *msg_queue; | ||
| 177 | unsigned int cpu = smp_processor_id(); | 142 | unsigned int cpu = smp_processor_id(); |
| 178 | unsigned long flags; | 143 | unsigned long pending; |
| 144 | unsigned long msg; | ||
| 179 | 145 | ||
| 180 | platform_clear_ipi(cpu, IRQ_SUPPLE_1); | 146 | platform_clear_ipi(cpu, IRQ_SUPPLE_1); |
| 181 | 147 | ||
| 182 | msg_queue = &__get_cpu_var(ipi_msg_queue); | 148 | bfin_ipi_data = &__get_cpu_var(bfin_ipi); |
| 183 | 149 | ||
| 184 | spin_lock_irqsave(&msg_queue->lock, flags); | 150 | while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) { |
| 185 | 151 | msg = 0; | |
| 186 | while (msg_queue->count) { | 152 | do { |
| 187 | msg = &msg_queue->ipi_message[msg_queue->head]; | 153 | msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1); |
| 188 | switch (msg->type) { | 154 | switch (msg) { |
| 189 | case BFIN_IPI_TIMER: | 155 | case BFIN_IPI_TIMER: |
| 190 | ipi_timer(); | 156 | ipi_timer(); |
| 191 | break; | 157 | break; |
| 192 | case BFIN_IPI_RESCHEDULE: | 158 | case BFIN_IPI_RESCHEDULE: |
| 193 | scheduler_ipi(); | 159 | scheduler_ipi(); |
| 194 | break; | 160 | break; |
| 195 | case BFIN_IPI_CALL_FUNC: | 161 | case BFIN_IPI_CALL_FUNC: |
| 196 | ipi_call_function(cpu, msg); | 162 | generic_smp_call_function_interrupt(); |
| 197 | break; | 163 | break; |
| 198 | case BFIN_IPI_CPU_STOP: | 164 | |
| 199 | ipi_cpu_stop(cpu); | 165 | case BFIN_IPI_CALL_FUNC_SINGLE: |
| 200 | break; | 166 | generic_smp_call_function_single_interrupt(); |
| 201 | default: | 167 | break; |
| 202 | printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n", | 168 | |
| 203 | cpu, msg->type); | 169 | case BFIN_IPI_CPU_STOP: |
| 204 | break; | 170 | ipi_cpu_stop(cpu); |
| 205 | } | 171 | break; |
| 206 | msg_queue->head++; | 172 | } |
| 207 | msg_queue->head %= BFIN_IPI_MSGQ_LEN; | 173 | } while (msg < BITS_PER_LONG); |
| 208 | msg_queue->count--; | 174 | |
| 175 | smp_mb(); | ||
| 209 | } | 176 | } |
| 210 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
| 211 | return IRQ_HANDLED; | 177 | return IRQ_HANDLED; |
| 212 | } | 178 | } |
| 213 | 179 | ||
| 214 | static void ipi_queue_init(void) | 180 | static void bfin_ipi_init(void) |
| 215 | { | 181 | { |
| 216 | unsigned int cpu; | 182 | unsigned int cpu; |
| 217 | struct ipi_message_queue *msg_queue; | 183 | struct ipi_data *bfin_ipi_data; |
| 218 | for_each_possible_cpu(cpu) { | 184 | for_each_possible_cpu(cpu) { |
| 219 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 185 | bfin_ipi_data = &per_cpu(bfin_ipi, cpu); |
| 220 | spin_lock_init(&msg_queue->lock); | 186 | bfin_ipi_data->bits = 0; |
| 221 | msg_queue->count = 0; | 187 | bfin_ipi_data->count = 0; |
| 222 | msg_queue->head = 0; | ||
| 223 | } | 188 | } |
| 224 | } | 189 | } |
| 225 | 190 | ||
| 226 | static inline void smp_send_message(cpumask_t callmap, unsigned long type, | 191 | void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg) |
| 227 | void (*func) (void *info), void *info, int wait) | ||
| 228 | { | 192 | { |
| 229 | unsigned int cpu; | 193 | unsigned int cpu; |
| 230 | struct ipi_message_queue *msg_queue; | 194 | struct ipi_data *bfin_ipi_data; |
| 231 | struct ipi_message *msg; | 195 | unsigned long flags; |
| 232 | unsigned long flags, next_msg; | 196 | |
| 233 | cpumask_t waitmask; /* waitmask is shared by all cpus */ | 197 | local_irq_save(flags); |
| 234 | 198 | ||
| 235 | cpumask_copy(&waitmask, &callmap); | 199 | for_each_cpu(cpu, cpumask) { |
| 236 | for_each_cpu(cpu, &callmap) { | 200 | bfin_ipi_data = &per_cpu(bfin_ipi, cpu); |
| 237 | msg_queue = &per_cpu(ipi_msg_queue, cpu); | 201 | smp_mb(); |
| 238 | spin_lock_irqsave(&msg_queue->lock, flags); | 202 | set_bit(msg, &bfin_ipi_data->bits); |
| 239 | if (msg_queue->count < BFIN_IPI_MSGQ_LEN) { | 203 | bfin_ipi_data->count++; |
| 240 | next_msg = (msg_queue->head + msg_queue->count) | ||
| 241 | % BFIN_IPI_MSGQ_LEN; | ||
| 242 | msg = &msg_queue->ipi_message[next_msg]; | ||
| 243 | msg->type = type; | ||
| 244 | if (type == BFIN_IPI_CALL_FUNC) { | ||
| 245 | msg->call_struct.func = func; | ||
| 246 | msg->call_struct.info = info; | ||
| 247 | msg->call_struct.wait = wait; | ||
| 248 | msg->call_struct.waitmask = &waitmask; | ||
| 249 | } | ||
| 250 | msg_queue->count++; | ||
| 251 | } else | ||
| 252 | panic("IPI message queue overflow\n"); | ||
| 253 | spin_unlock_irqrestore(&msg_queue->lock, flags); | ||
| 254 | platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); | 204 | platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1); |
| 255 | } | 205 | } |
| 256 | 206 | ||
| 257 | if (wait) { | 207 | local_irq_restore(flags); |
| 258 | while (!cpumask_empty(&waitmask)) | ||
| 259 | blackfin_dcache_invalidate_range( | ||
| 260 | (unsigned long)(&waitmask), | ||
| 261 | (unsigned long)(&waitmask)); | ||
| 262 | #ifdef __ARCH_SYNC_CORE_DCACHE | ||
| 263 | /* | ||
| 264 | * Invalidate D cache in case shared data was changed by | ||
| 265 | * other processors to ensure cache coherence. | ||
| 266 | */ | ||
| 267 | resync_core_dcache(); | ||
| 268 | #endif | ||
| 269 | } | ||
| 270 | } | 208 | } |
| 271 | 209 | ||
| 272 | int smp_call_function(void (*func)(void *info), void *info, int wait) | 210 | void arch_send_call_function_single_ipi(int cpu) |
| 273 | { | 211 | { |
| 274 | cpumask_t callmap; | 212 | send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE); |
| 275 | |||
| 276 | preempt_disable(); | ||
| 277 | cpumask_copy(&callmap, cpu_online_mask); | ||
| 278 | cpumask_clear_cpu(smp_processor_id(), &callmap); | ||
| 279 | if (!cpumask_empty(&callmap)) | ||
| 280 | smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); | ||
| 281 | |||
| 282 | preempt_enable(); | ||
| 283 | |||
| 284 | return 0; | ||
| 285 | } | 213 | } |
| 286 | EXPORT_SYMBOL_GPL(smp_call_function); | ||
| 287 | 214 | ||
| 288 | int smp_call_function_single(int cpuid, void (*func) (void *info), void *info, | 215 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
| 289 | int wait) | ||
| 290 | { | 216 | { |
| 291 | unsigned int cpu = cpuid; | 217 | send_ipi(mask, BFIN_IPI_CALL_FUNC); |
| 292 | cpumask_t callmap; | ||
| 293 | |||
| 294 | if (cpu_is_offline(cpu)) | ||
| 295 | return 0; | ||
| 296 | cpumask_clear(&callmap); | ||
| 297 | cpumask_set_cpu(cpu, &callmap); | ||
| 298 | |||
| 299 | smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait); | ||
| 300 | |||
| 301 | return 0; | ||
| 302 | } | 218 | } |
| 303 | EXPORT_SYMBOL_GPL(smp_call_function_single); | ||
| 304 | 219 | ||
| 305 | void smp_send_reschedule(int cpu) | 220 | void smp_send_reschedule(int cpu) |
| 306 | { | 221 | { |
| 307 | cpumask_t callmap; | 222 | send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE); |
| 308 | /* simply trigger an ipi */ | ||
| 309 | |||
| 310 | cpumask_clear(&callmap); | ||
| 311 | cpumask_set_cpu(cpu, &callmap); | ||
| 312 | |||
| 313 | smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0); | ||
| 314 | 223 | ||
| 315 | return; | 224 | return; |
| 316 | } | 225 | } |
| 317 | 226 | ||
| 318 | void smp_send_msg(const struct cpumask *mask, unsigned long type) | 227 | void smp_send_msg(const struct cpumask *mask, unsigned long type) |
| 319 | { | 228 | { |
| 320 | smp_send_message(*mask, type, NULL, NULL, 0); | 229 | send_ipi(mask, type); |
| 321 | } | 230 | } |
| 322 | 231 | ||
| 323 | void smp_timer_broadcast(const struct cpumask *mask) | 232 | void smp_timer_broadcast(const struct cpumask *mask) |
| @@ -333,7 +242,7 @@ void smp_send_stop(void) | |||
| 333 | cpumask_copy(&callmap, cpu_online_mask); | 242 | cpumask_copy(&callmap, cpu_online_mask); |
| 334 | cpumask_clear_cpu(smp_processor_id(), &callmap); | 243 | cpumask_clear_cpu(smp_processor_id(), &callmap); |
| 335 | if (!cpumask_empty(&callmap)) | 244 | if (!cpumask_empty(&callmap)) |
| 336 | smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0); | 245 | send_ipi(&callmap, BFIN_IPI_CPU_STOP); |
| 337 | 246 | ||
| 338 | preempt_enable(); | 247 | preempt_enable(); |
| 339 | 248 | ||
| @@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void) | |||
| 436 | void __init smp_prepare_cpus(unsigned int max_cpus) | 345 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 437 | { | 346 | { |
| 438 | platform_prepare_cpus(max_cpus); | 347 | platform_prepare_cpus(max_cpus); |
| 439 | ipi_queue_init(); | 348 | bfin_ipi_init(); |
| 440 | platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); | 349 | platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0); |
| 441 | platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); | 350 | platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1); |
| 442 | } | 351 | } |
