diff options
| author | Bob Breuer <breuerr@mc.net> | 2006-03-24 01:36:19 -0500 |
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2006-03-24 01:36:19 -0500 |
| commit | a54123e27779049d27d21e6c8adfee73aa2c0734 (patch) | |
| tree | 265849e706e4ebe3b75127ebe6e3cbfe2a78850a | |
| parent | 674a396c6d2ba0341ebdd7c1c9950f32f018e2dd (diff) | |
[SPARC]: Try to start getting SMP back into shape.
Todo items:
- IRQ_INPROGRESS flag - use sparc64 irq buckets, or generic irq_desc?
- sun4d
- re-indent large chunks of sun4m_smp.c
- some places assume sequential cpu numbering (i.e. 0,1 instead of 0,2)
Last I checked (with 2.6.14), random programs segfault with dual
HyperSPARC. And with SuperSPARC II's, it seems stable but will
eventually die from a write lock error (wrong lock owner or something).
I haven't tried the HyperSPARC + highmem combination recently, so that
may still be a problem.
Signed-off-by: David S. Miller <davem@davemloft.net>
| -rw-r--r-- | arch/sparc/Kconfig | 1 | ||||
| -rw-r--r-- | arch/sparc/kernel/irq.c | 66 | ||||
| -rw-r--r-- | arch/sparc/kernel/smp.c | 84 | ||||
| -rw-r--r-- | arch/sparc/kernel/sparc_ksyms.c | 4 | ||||
| -rw-r--r-- | arch/sparc/kernel/sun4d_irq.c | 2 | ||||
| -rw-r--r-- | arch/sparc/kernel/sun4d_smp.c | 8 | ||||
| -rw-r--r-- | arch/sparc/kernel/sun4m_smp.c | 181 | ||||
| -rw-r--r-- | arch/sparc/mm/srmmu.c | 6 | ||||
| -rw-r--r-- | include/asm-sparc/cpudata.h | 1 | ||||
| -rw-r--r-- | include/asm-sparc/smp.h | 9 | ||||
| -rw-r--r-- | include/asm-sparc/spinlock.h | 25 |
11 files changed, 209 insertions, 178 deletions
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index f944b58cdfe7..7c58fc1a39c4 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
| @@ -23,7 +23,6 @@ menu "General machine setup" | |||
| 23 | 23 | ||
| 24 | config SMP | 24 | config SMP |
| 25 | bool "Symmetric multi-processing support (does not work on sun4/sun4c)" | 25 | bool "Symmetric multi-processing support (does not work on sun4/sun4c)" |
| 26 | depends on BROKEN | ||
| 27 | ---help--- | 26 | ---help--- |
| 28 | This enables support for systems with more than one CPU. If you have | 27 | This enables support for systems with more than one CPU. If you have |
| 29 | a system with only one CPU, say N. If you have a system with more | 28 | a system with only one CPU, say N. If you have a system with more |
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c index 4c60a6ef54a9..aac8af5aae51 100644 --- a/arch/sparc/kernel/irq.c +++ b/arch/sparc/kernel/irq.c | |||
| @@ -154,9 +154,11 @@ void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) = | |||
| 154 | struct irqaction static_irqaction[MAX_STATIC_ALLOC]; | 154 | struct irqaction static_irqaction[MAX_STATIC_ALLOC]; |
| 155 | int static_irq_count; | 155 | int static_irq_count; |
| 156 | 156 | ||
| 157 | struct irqaction *irq_action[NR_IRQS] = { | 157 | struct { |
| 158 | [0 ... (NR_IRQS-1)] = NULL | 158 | struct irqaction *action; |
| 159 | }; | 159 | int flags; |
| 160 | } sparc_irq[NR_IRQS]; | ||
| 161 | #define SPARC_IRQ_INPROGRESS 1 | ||
| 160 | 162 | ||
| 161 | /* Used to protect the IRQ action lists */ | 163 | /* Used to protect the IRQ action lists */ |
| 162 | DEFINE_SPINLOCK(irq_action_lock); | 164 | DEFINE_SPINLOCK(irq_action_lock); |
| @@ -177,7 +179,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 177 | } | 179 | } |
| 178 | spin_lock_irqsave(&irq_action_lock, flags); | 180 | spin_lock_irqsave(&irq_action_lock, flags); |
| 179 | if (i < NR_IRQS) { | 181 | if (i < NR_IRQS) { |
| 180 | action = *(i + irq_action); | 182 | action = sparc_irq[i].action; |
| 181 | if (!action) | 183 | if (!action) |
| 182 | goto out_unlock; | 184 | goto out_unlock; |
| 183 | seq_printf(p, "%3d: ", i); | 185 | seq_printf(p, "%3d: ", i); |
| @@ -186,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
| 186 | #else | 188 | #else |
| 187 | for_each_online_cpu(j) { | 189 | for_each_online_cpu(j) { |
| 188 | seq_printf(p, "%10u ", | 190 | seq_printf(p, "%10u ", |
| 189 | kstat_cpu(cpu_logical_map(j)).irqs[i]); | 191 | kstat_cpu(j).irqs[i]); |
| 190 | } | 192 | } |
| 191 | #endif | 193 | #endif |
| 192 | seq_printf(p, " %c %s", | 194 | seq_printf(p, " %c %s", |
| @@ -207,7 +209,7 @@ out_unlock: | |||
| 207 | void free_irq(unsigned int irq, void *dev_id) | 209 | void free_irq(unsigned int irq, void *dev_id) |
| 208 | { | 210 | { |
| 209 | struct irqaction * action; | 211 | struct irqaction * action; |
| 210 | struct irqaction * tmp = NULL; | 212 | struct irqaction **actionp; |
| 211 | unsigned long flags; | 213 | unsigned long flags; |
| 212 | unsigned int cpu_irq; | 214 | unsigned int cpu_irq; |
| 213 | 215 | ||
| @@ -225,7 +227,8 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 225 | 227 | ||
| 226 | spin_lock_irqsave(&irq_action_lock, flags); | 228 | spin_lock_irqsave(&irq_action_lock, flags); |
| 227 | 229 | ||
| 228 | action = *(cpu_irq + irq_action); | 230 | actionp = &sparc_irq[cpu_irq].action; |
| 231 | action = *actionp; | ||
| 229 | 232 | ||
| 230 | if (!action->handler) { | 233 | if (!action->handler) { |
| 231 | printk("Trying to free free IRQ%d\n",irq); | 234 | printk("Trying to free free IRQ%d\n",irq); |
| @@ -235,7 +238,7 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 235 | for (; action; action = action->next) { | 238 | for (; action; action = action->next) { |
| 236 | if (action->dev_id == dev_id) | 239 | if (action->dev_id == dev_id) |
| 237 | break; | 240 | break; |
| 238 | tmp = action; | 241 | actionp = &action->next; |
| 239 | } | 242 | } |
| 240 | if (!action) { | 243 | if (!action) { |
| 241 | printk("Trying to free free shared IRQ%d\n",irq); | 244 | printk("Trying to free free shared IRQ%d\n",irq); |
| @@ -254,11 +257,8 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 254 | irq, action->name); | 257 | irq, action->name); |
| 255 | goto out_unlock; | 258 | goto out_unlock; |
| 256 | } | 259 | } |
| 257 | 260 | ||
| 258 | if (action && tmp) | 261 | *actionp = action->next; |
| 259 | tmp->next = action->next; | ||
| 260 | else | ||
| 261 | *(cpu_irq + irq_action) = action->next; | ||
| 262 | 262 | ||
| 263 | spin_unlock_irqrestore(&irq_action_lock, flags); | 263 | spin_unlock_irqrestore(&irq_action_lock, flags); |
| 264 | 264 | ||
| @@ -268,7 +268,7 @@ void free_irq(unsigned int irq, void *dev_id) | |||
| 268 | 268 | ||
| 269 | kfree(action); | 269 | kfree(action); |
| 270 | 270 | ||
| 271 | if (!(*(cpu_irq + irq_action))) | 271 | if (!sparc_irq[cpu_irq].action) |
| 272 | disable_irq(irq); | 272 | disable_irq(irq); |
| 273 | 273 | ||
| 274 | out_unlock: | 274 | out_unlock: |
| @@ -287,8 +287,11 @@ EXPORT_SYMBOL(free_irq); | |||
| 287 | #ifdef CONFIG_SMP | 287 | #ifdef CONFIG_SMP |
| 288 | void synchronize_irq(unsigned int irq) | 288 | void synchronize_irq(unsigned int irq) |
| 289 | { | 289 | { |
| 290 | printk("synchronize_irq says: implement me!\n"); | 290 | unsigned int cpu_irq; |
| 291 | BUG(); | 291 | |
| 292 | cpu_irq = irq & (NR_IRQS - 1); | ||
| 293 | while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS) | ||
| 294 | cpu_relax(); | ||
| 292 | } | 295 | } |
| 293 | #endif /* SMP */ | 296 | #endif /* SMP */ |
| 294 | 297 | ||
| @@ -299,7 +302,7 @@ void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs) | |||
| 299 | unsigned int cpu_irq; | 302 | unsigned int cpu_irq; |
| 300 | 303 | ||
| 301 | cpu_irq = irq & (NR_IRQS - 1); | 304 | cpu_irq = irq & (NR_IRQS - 1); |
| 302 | action = *(cpu_irq + irq_action); | 305 | action = sparc_irq[cpu_irq].action; |
| 303 | 306 | ||
| 304 | printk("IO device interrupt, irq = %d\n", irq); | 307 | printk("IO device interrupt, irq = %d\n", irq); |
| 305 | printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, | 308 | printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, |
| @@ -330,7 +333,8 @@ void handler_irq(int irq, struct pt_regs * regs) | |||
| 330 | if(irq < 10) | 333 | if(irq < 10) |
| 331 | smp4m_irq_rotate(cpu); | 334 | smp4m_irq_rotate(cpu); |
| 332 | #endif | 335 | #endif |
| 333 | action = *(irq + irq_action); | 336 | action = sparc_irq[irq].action; |
| 337 | sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS; | ||
| 334 | kstat_cpu(cpu).irqs[irq]++; | 338 | kstat_cpu(cpu).irqs[irq]++; |
| 335 | do { | 339 | do { |
| 336 | if (!action || !action->handler) | 340 | if (!action || !action->handler) |
| @@ -338,6 +342,7 @@ void handler_irq(int irq, struct pt_regs * regs) | |||
| 338 | action->handler(irq, action->dev_id, regs); | 342 | action->handler(irq, action->dev_id, regs); |
| 339 | action = action->next; | 343 | action = action->next; |
| 340 | } while (action); | 344 | } while (action); |
| 345 | sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS; | ||
| 341 | enable_pil_irq(irq); | 346 | enable_pil_irq(irq); |
| 342 | irq_exit(); | 347 | irq_exit(); |
| 343 | } | 348 | } |
| @@ -389,7 +394,7 @@ int request_fast_irq(unsigned int irq, | |||
| 389 | 394 | ||
| 390 | spin_lock_irqsave(&irq_action_lock, flags); | 395 | spin_lock_irqsave(&irq_action_lock, flags); |
| 391 | 396 | ||
| 392 | action = *(cpu_irq + irq_action); | 397 | action = sparc_irq[cpu_irq].action; |
| 393 | if(action) { | 398 | if(action) { |
| 394 | if(action->flags & SA_SHIRQ) | 399 | if(action->flags & SA_SHIRQ) |
| 395 | panic("Trying to register fast irq when already shared.\n"); | 400 | panic("Trying to register fast irq when already shared.\n"); |
| @@ -452,7 +457,7 @@ int request_fast_irq(unsigned int irq, | |||
| 452 | action->dev_id = NULL; | 457 | action->dev_id = NULL; |
| 453 | action->next = NULL; | 458 | action->next = NULL; |
| 454 | 459 | ||
| 455 | *(cpu_irq + irq_action) = action; | 460 | sparc_irq[cpu_irq].action = action; |
| 456 | 461 | ||
| 457 | enable_irq(irq); | 462 | enable_irq(irq); |
| 458 | 463 | ||
| @@ -467,7 +472,7 @@ int request_irq(unsigned int irq, | |||
| 467 | irqreturn_t (*handler)(int, void *, struct pt_regs *), | 472 | irqreturn_t (*handler)(int, void *, struct pt_regs *), |
| 468 | unsigned long irqflags, const char * devname, void *dev_id) | 473 | unsigned long irqflags, const char * devname, void *dev_id) |
| 469 | { | 474 | { |
| 470 | struct irqaction * action, *tmp = NULL; | 475 | struct irqaction * action, **actionp; |
| 471 | unsigned long flags; | 476 | unsigned long flags; |
| 472 | unsigned int cpu_irq; | 477 | unsigned int cpu_irq; |
| 473 | int ret; | 478 | int ret; |
| @@ -490,20 +495,20 @@ int request_irq(unsigned int irq, | |||
| 490 | 495 | ||
| 491 | spin_lock_irqsave(&irq_action_lock, flags); | 496 | spin_lock_irqsave(&irq_action_lock, flags); |
| 492 | 497 | ||
| 493 | action = *(cpu_irq + irq_action); | 498 | actionp = &sparc_irq[cpu_irq].action; |
| 499 | action = *actionp; | ||
| 494 | if (action) { | 500 | if (action) { |
| 495 | if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { | 501 | if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ)) { |
| 496 | for (tmp = action; tmp->next; tmp = tmp->next); | ||
| 497 | } else { | ||
| 498 | ret = -EBUSY; | 502 | ret = -EBUSY; |
| 499 | goto out_unlock; | 503 | goto out_unlock; |
| 500 | } | 504 | } |
| 501 | if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) { | 505 | if ((action->flags & SA_INTERRUPT) != (irqflags & SA_INTERRUPT)) { |
| 502 | printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); | 506 | printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); |
| 503 | ret = -EBUSY; | 507 | ret = -EBUSY; |
| 504 | goto out_unlock; | 508 | goto out_unlock; |
| 505 | } | 509 | } |
| 506 | action = NULL; /* Or else! */ | 510 | for ( ; action; action = *actionp) |
| 511 | actionp = &action->next; | ||
| 507 | } | 512 | } |
| 508 | 513 | ||
| 509 | /* If this is flagged as statically allocated then we use our | 514 | /* If this is flagged as statically allocated then we use our |
| @@ -532,10 +537,7 @@ int request_irq(unsigned int irq, | |||
| 532 | action->next = NULL; | 537 | action->next = NULL; |
| 533 | action->dev_id = dev_id; | 538 | action->dev_id = dev_id; |
| 534 | 539 | ||
| 535 | if (tmp) | 540 | *actionp = action; |
| 536 | tmp->next = action; | ||
| 537 | else | ||
| 538 | *(cpu_irq + irq_action) = action; | ||
| 539 | 541 | ||
| 540 | enable_irq(irq); | 542 | enable_irq(irq); |
| 541 | 543 | ||
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c index ea5682ce7031..2be812115197 100644 --- a/arch/sparc/kernel/smp.c +++ b/arch/sparc/kernel/smp.c | |||
| @@ -45,6 +45,7 @@ volatile int __cpu_logical_map[NR_CPUS]; | |||
| 45 | 45 | ||
| 46 | cpumask_t cpu_online_map = CPU_MASK_NONE; | 46 | cpumask_t cpu_online_map = CPU_MASK_NONE; |
| 47 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | 47 | cpumask_t phys_cpu_present_map = CPU_MASK_NONE; |
| 48 | cpumask_t smp_commenced_mask = CPU_MASK_NONE; | ||
| 48 | 49 | ||
| 49 | /* The only guaranteed locking primitive available on all Sparc | 50 | /* The only guaranteed locking primitive available on all Sparc |
| 50 | * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically | 51 | * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically |
| @@ -57,11 +58,6 @@ cpumask_t phys_cpu_present_map = CPU_MASK_NONE; | |||
| 57 | /* Used to make bitops atomic */ | 58 | /* Used to make bitops atomic */ |
| 58 | unsigned char bitops_spinlock = 0; | 59 | unsigned char bitops_spinlock = 0; |
| 59 | 60 | ||
| 60 | volatile unsigned long ipi_count; | ||
| 61 | |||
| 62 | volatile int smp_process_available=0; | ||
| 63 | volatile int smp_commenced = 0; | ||
| 64 | |||
| 65 | void __init smp_store_cpu_info(int id) | 61 | void __init smp_store_cpu_info(int id) |
| 66 | { | 62 | { |
| 67 | int cpu_node; | 63 | int cpu_node; |
| @@ -79,6 +75,22 @@ void __init smp_store_cpu_info(int id) | |||
| 79 | 75 | ||
| 80 | void __init smp_cpus_done(unsigned int max_cpus) | 76 | void __init smp_cpus_done(unsigned int max_cpus) |
| 81 | { | 77 | { |
| 78 | extern void smp4m_smp_done(void); | ||
| 79 | unsigned long bogosum = 0; | ||
| 80 | int cpu, num; | ||
| 81 | |||
| 82 | for (cpu = 0, num = 0; cpu < NR_CPUS; cpu++) | ||
| 83 | if (cpu_online(cpu)) { | ||
| 84 | num++; | ||
| 85 | bogosum += cpu_data(cpu).udelay_val; | ||
| 86 | } | ||
| 87 | |||
| 88 | printk("Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | ||
| 89 | num, bogosum/(500000/HZ), | ||
| 90 | (bogosum/(5000/HZ))%100); | ||
| 91 | |||
| 92 | BUG_ON(sparc_cpu_model != sun4m); | ||
| 93 | smp4m_smp_done(); | ||
| 82 | } | 94 | } |
| 83 | 95 | ||
| 84 | void cpu_panic(void) | 96 | void cpu_panic(void) |
| @@ -89,17 +101,6 @@ void cpu_panic(void) | |||
| 89 | 101 | ||
| 90 | struct linux_prom_registers smp_penguin_ctable __initdata = { 0 }; | 102 | struct linux_prom_registers smp_penguin_ctable __initdata = { 0 }; |
| 91 | 103 | ||
| 92 | void __init smp_boot_cpus(void) | ||
| 93 | { | ||
| 94 | extern void smp4m_boot_cpus(void); | ||
| 95 | extern void smp4d_boot_cpus(void); | ||
| 96 | |||
| 97 | if (sparc_cpu_model == sun4m) | ||
| 98 | smp4m_boot_cpus(); | ||
| 99 | else | ||
| 100 | smp4d_boot_cpus(); | ||
| 101 | } | ||
| 102 | |||
| 103 | void smp_send_reschedule(int cpu) | 104 | void smp_send_reschedule(int cpu) |
| 104 | { | 105 | { |
| 105 | /* See sparc64 */ | 106 | /* See sparc64 */ |
| @@ -252,20 +253,61 @@ int setup_profiling_timer(unsigned int multiplier) | |||
| 252 | return 0; | 253 | return 0; |
| 253 | } | 254 | } |
| 254 | 255 | ||
| 255 | void __init smp_prepare_cpus(unsigned int maxcpus) | 256 | void __init smp_prepare_cpus(unsigned int max_cpus) |
| 256 | { | 257 | { |
| 258 | extern void smp4m_boot_cpus(void); | ||
| 259 | int i, cpuid, ncpus, extra; | ||
| 260 | |||
| 261 | BUG_ON(sparc_cpu_model != sun4m); | ||
| 262 | printk("Entering SMP Mode...\n"); | ||
| 263 | |||
| 264 | ncpus = 1; | ||
| 265 | extra = 0; | ||
| 266 | for (i = 0; !cpu_find_by_instance(i, NULL, &cpuid); i++) { | ||
| 267 | if (cpuid == boot_cpu_id) | ||
| 268 | continue; | ||
| 269 | if (cpuid < NR_CPUS && ncpus++ < max_cpus) | ||
| 270 | cpu_set(cpuid, phys_cpu_present_map); | ||
| 271 | else | ||
| 272 | extra++; | ||
| 273 | } | ||
| 274 | if (max_cpus >= NR_CPUS && extra) | ||
| 275 | printk("Warning: NR_CPUS is too low to start all cpus\n"); | ||
| 276 | |||
| 277 | smp_store_cpu_info(boot_cpu_id); | ||
| 278 | |||
| 279 | smp4m_boot_cpus(); | ||
| 257 | } | 280 | } |
| 258 | 281 | ||
| 259 | void __devinit smp_prepare_boot_cpu(void) | 282 | void __devinit smp_prepare_boot_cpu(void) |
| 260 | { | 283 | { |
| 261 | current_thread_info()->cpu = hard_smp_processor_id(); | 284 | int cpuid = hard_smp_processor_id(); |
| 262 | cpu_set(smp_processor_id(), cpu_online_map); | 285 | |
| 263 | cpu_set(smp_processor_id(), phys_cpu_present_map); | 286 | if (cpuid >= NR_CPUS) { |
| 287 | prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); | ||
| 288 | prom_halt(); | ||
| 289 | } | ||
| 290 | if (cpuid != 0) | ||
| 291 | printk("boot cpu id != 0, this could work but is untested\n"); | ||
| 292 | |||
| 293 | current_thread_info()->cpu = cpuid; | ||
| 294 | cpu_set(cpuid, cpu_online_map); | ||
| 295 | cpu_set(cpuid, phys_cpu_present_map); | ||
| 264 | } | 296 | } |
| 265 | 297 | ||
| 266 | int __devinit __cpu_up(unsigned int cpu) | 298 | int __devinit __cpu_up(unsigned int cpu) |
| 267 | { | 299 | { |
| 268 | panic("smp doesn't work\n"); | 300 | extern int smp4m_boot_one_cpu(int); |
| 301 | int ret; | ||
| 302 | |||
| 303 | ret = smp4m_boot_one_cpu(cpu); | ||
| 304 | |||
| 305 | if (!ret) { | ||
| 306 | cpu_set(cpu, smp_commenced_mask); | ||
| 307 | while (!cpu_online(cpu)) | ||
| 308 | mb(); | ||
| 309 | } | ||
| 310 | return ret; | ||
| 269 | } | 311 | } |
| 270 | 312 | ||
| 271 | void smp_bogo(struct seq_file *m) | 313 | void smp_bogo(struct seq_file *m) |
diff --git a/arch/sparc/kernel/sparc_ksyms.c b/arch/sparc/kernel/sparc_ksyms.c index 19b25399d7e4..2c21d7907635 100644 --- a/arch/sparc/kernel/sparc_ksyms.c +++ b/arch/sparc/kernel/sparc_ksyms.c | |||
| @@ -136,10 +136,6 @@ EXPORT_PER_CPU_SYMBOL(__cpu_data); | |||
| 136 | /* IRQ implementation. */ | 136 | /* IRQ implementation. */ |
| 137 | EXPORT_SYMBOL(synchronize_irq); | 137 | EXPORT_SYMBOL(synchronize_irq); |
| 138 | 138 | ||
| 139 | /* Misc SMP information */ | ||
| 140 | EXPORT_SYMBOL(__cpu_number_map); | ||
| 141 | EXPORT_SYMBOL(__cpu_logical_map); | ||
| 142 | |||
| 143 | /* CPU online map and active count. */ | 139 | /* CPU online map and active count. */ |
| 144 | EXPORT_SYMBOL(cpu_online_map); | 140 | EXPORT_SYMBOL(cpu_online_map); |
| 145 | EXPORT_SYMBOL(phys_cpu_present_map); | 141 | EXPORT_SYMBOL(phys_cpu_present_map); |
diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index cea7fc6fc6e5..ca656d9bd6fd 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c | |||
| @@ -54,7 +54,7 @@ unsigned char cpu_leds[32]; | |||
| 54 | unsigned char sbus_tid[32]; | 54 | unsigned char sbus_tid[32]; |
| 55 | #endif | 55 | #endif |
| 56 | 56 | ||
| 57 | extern struct irqaction *irq_action[]; | 57 | static struct irqaction *irq_action[NR_IRQS]; |
| 58 | extern spinlock_t irq_action_lock; | 58 | extern spinlock_t irq_action_lock; |
| 59 | 59 | ||
| 60 | struct sbus_action { | 60 | struct sbus_action { |
diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 41bb9596be48..b141b7ee6717 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c | |||
| @@ -46,14 +46,16 @@ extern volatile int smp_processors_ready; | |||
| 46 | extern int smp_num_cpus; | 46 | extern int smp_num_cpus; |
| 47 | static int smp_highest_cpu; | 47 | static int smp_highest_cpu; |
| 48 | extern volatile unsigned long cpu_callin_map[NR_CPUS]; | 48 | extern volatile unsigned long cpu_callin_map[NR_CPUS]; |
| 49 | extern struct cpuinfo_sparc cpu_data[NR_CPUS]; | 49 | extern cpuinfo_sparc cpu_data[NR_CPUS]; |
| 50 | extern unsigned char boot_cpu_id; | 50 | extern unsigned char boot_cpu_id; |
| 51 | extern int smp_activated; | 51 | extern int smp_activated; |
| 52 | extern volatile int __cpu_number_map[NR_CPUS]; | 52 | extern volatile int __cpu_number_map[NR_CPUS]; |
| 53 | extern volatile int __cpu_logical_map[NR_CPUS]; | 53 | extern volatile int __cpu_logical_map[NR_CPUS]; |
| 54 | extern volatile unsigned long ipi_count; | 54 | extern volatile unsigned long ipi_count; |
| 55 | extern volatile int smp_process_available; | 55 | extern volatile int smp_process_available; |
| 56 | extern volatile int smp_commenced; | 56 | |
| 57 | extern cpumask_t smp_commenced_mask; | ||
| 58 | |||
| 57 | extern int __smp4d_processor_id(void); | 59 | extern int __smp4d_processor_id(void); |
| 58 | 60 | ||
| 59 | /* #define SMP_DEBUG */ | 61 | /* #define SMP_DEBUG */ |
| @@ -136,7 +138,7 @@ void __init smp4d_callin(void) | |||
| 136 | 138 | ||
| 137 | local_irq_enable(); /* We don't allow PIL 14 yet */ | 139 | local_irq_enable(); /* We don't allow PIL 14 yet */ |
| 138 | 140 | ||
| 139 | while(!smp_commenced) | 141 | while (!cpu_isset(cpuid, smp_commenced_mask)) |
| 140 | barrier(); | 142 | barrier(); |
| 141 | 143 | ||
| 142 | spin_lock_irqsave(&sun4d_imsk_lock, flags); | 144 | spin_lock_irqsave(&sun4d_imsk_lock, flags); |
diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 1dde312eebda..70b375a4c2c2 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c | |||
| @@ -40,15 +40,11 @@ extern ctxd_t *srmmu_ctx_table_phys; | |||
| 40 | extern void calibrate_delay(void); | 40 | extern void calibrate_delay(void); |
| 41 | 41 | ||
| 42 | extern volatile int smp_processors_ready; | 42 | extern volatile int smp_processors_ready; |
| 43 | extern int smp_num_cpus; | ||
| 44 | extern volatile unsigned long cpu_callin_map[NR_CPUS]; | 43 | extern volatile unsigned long cpu_callin_map[NR_CPUS]; |
| 45 | extern unsigned char boot_cpu_id; | 44 | extern unsigned char boot_cpu_id; |
| 46 | extern int smp_activated; | 45 | |
| 47 | extern volatile int __cpu_number_map[NR_CPUS]; | 46 | extern cpumask_t smp_commenced_mask; |
| 48 | extern volatile int __cpu_logical_map[NR_CPUS]; | 47 | |
| 49 | extern volatile unsigned long ipi_count; | ||
| 50 | extern volatile int smp_process_available; | ||
| 51 | extern volatile int smp_commenced; | ||
| 52 | extern int __smp4m_processor_id(void); | 48 | extern int __smp4m_processor_id(void); |
| 53 | 49 | ||
| 54 | /*#define SMP_DEBUG*/ | 50 | /*#define SMP_DEBUG*/ |
| @@ -77,8 +73,6 @@ void __init smp4m_callin(void) | |||
| 77 | local_flush_cache_all(); | 73 | local_flush_cache_all(); |
| 78 | local_flush_tlb_all(); | 74 | local_flush_tlb_all(); |
| 79 | 75 | ||
| 80 | set_irq_udt(boot_cpu_id); | ||
| 81 | |||
| 82 | /* Get our local ticker going. */ | 76 | /* Get our local ticker going. */ |
| 83 | smp_setup_percpu_timer(); | 77 | smp_setup_percpu_timer(); |
| 84 | 78 | ||
| @@ -95,8 +89,9 @@ void __init smp4m_callin(void) | |||
| 95 | * to call the scheduler code. | 89 | * to call the scheduler code. |
| 96 | */ | 90 | */ |
| 97 | /* Allow master to continue. */ | 91 | /* Allow master to continue. */ |
| 98 | swap((unsigned long *)&cpu_callin_map[cpuid], 1); | 92 | swap(&cpu_callin_map[cpuid], 1); |
| 99 | 93 | ||
| 94 | /* XXX: What's up with all the flushes? */ | ||
| 100 | local_flush_cache_all(); | 95 | local_flush_cache_all(); |
| 101 | local_flush_tlb_all(); | 96 | local_flush_tlb_all(); |
| 102 | 97 | ||
| @@ -111,13 +106,14 @@ void __init smp4m_callin(void) | |||
| 111 | atomic_inc(&init_mm.mm_count); | 106 | atomic_inc(&init_mm.mm_count); |
| 112 | current->active_mm = &init_mm; | 107 | current->active_mm = &init_mm; |
| 113 | 108 | ||
| 114 | while(!smp_commenced) | 109 | while (!cpu_isset(cpuid, smp_commenced_mask)) |
| 115 | barrier(); | 110 | mb(); |
| 116 | |||
| 117 | local_flush_cache_all(); | ||
| 118 | local_flush_tlb_all(); | ||
| 119 | 111 | ||
| 120 | local_irq_enable(); | 112 | local_irq_enable(); |
| 113 | |||
| 114 | cpu_set(cpuid, cpu_online_map); | ||
| 115 | /* last one in gets all the interrupts (for testing) */ | ||
| 116 | set_irq_udt(boot_cpu_id); | ||
| 121 | } | 117 | } |
| 122 | 118 | ||
| 123 | extern void init_IRQ(void); | 119 | extern void init_IRQ(void); |
| @@ -134,102 +130,76 @@ extern unsigned long trapbase_cpu3[]; | |||
| 134 | 130 | ||
| 135 | void __init smp4m_boot_cpus(void) | 131 | void __init smp4m_boot_cpus(void) |
| 136 | { | 132 | { |
| 137 | int cpucount = 0; | 133 | smp_setup_percpu_timer(); |
| 138 | int i, mid; | 134 | local_flush_cache_all(); |
| 135 | } | ||
| 139 | 136 | ||
| 140 | printk("Entering SMP Mode...\n"); | 137 | int smp4m_boot_one_cpu(int i) |
| 138 | { | ||
| 139 | extern unsigned long sun4m_cpu_startup; | ||
| 140 | unsigned long *entry = &sun4m_cpu_startup; | ||
| 141 | struct task_struct *p; | ||
| 142 | int timeout; | ||
| 143 | int cpu_node; | ||
| 141 | 144 | ||
| 142 | local_irq_enable(); | 145 | cpu_find_by_mid(i, &cpu_node); |
| 143 | cpus_clear(cpu_present_map); | 146 | |
| 147 | /* Cook up an idler for this guy. */ | ||
| 148 | p = fork_idle(i); | ||
| 149 | current_set[i] = task_thread_info(p); | ||
| 150 | /* See trampoline.S for details... */ | ||
| 151 | entry += ((i-1) * 3); | ||
| 144 | 152 | ||
| 145 | for (i = 0; !cpu_find_by_instance(i, NULL, &mid); i++) | 153 | /* |
| 146 | cpu_set(mid, cpu_present_map); | 154 | * Initialize the contexts table |
| 155 | * Since the call to prom_startcpu() trashes the structure, | ||
| 156 | * we need to re-initialize it for each cpu | ||
| 157 | */ | ||
| 158 | smp_penguin_ctable.which_io = 0; | ||
| 159 | smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; | ||
| 160 | smp_penguin_ctable.reg_size = 0; | ||
| 147 | 161 | ||
| 148 | for(i=0; i < NR_CPUS; i++) { | 162 | /* whirrr, whirrr, whirrrrrrrrr... */ |
| 149 | __cpu_number_map[i] = -1; | 163 | printk("Starting CPU %d at %p\n", i, entry); |
| 150 | __cpu_logical_map[i] = -1; | 164 | local_flush_cache_all(); |
| 165 | prom_startcpu(cpu_node, | ||
| 166 | &smp_penguin_ctable, 0, (char *)entry); | ||
| 167 | |||
| 168 | /* wheee... it's going... */ | ||
| 169 | for(timeout = 0; timeout < 10000; timeout++) { | ||
| 170 | if(cpu_callin_map[i]) | ||
| 171 | break; | ||
| 172 | udelay(200); | ||
| 151 | } | 173 | } |
| 152 | 174 | ||
| 153 | __cpu_number_map[boot_cpu_id] = 0; | 175 | if (!(cpu_callin_map[i])) { |
| 154 | __cpu_logical_map[0] = boot_cpu_id; | 176 | printk("Processor %d is stuck.\n", i); |
| 155 | current_thread_info()->cpu = boot_cpu_id; | 177 | return -ENODEV; |
| 178 | } | ||
| 156 | 179 | ||
| 157 | smp_store_cpu_info(boot_cpu_id); | ||
| 158 | set_irq_udt(boot_cpu_id); | ||
| 159 | smp_setup_percpu_timer(); | ||
| 160 | local_flush_cache_all(); | 180 | local_flush_cache_all(); |
| 161 | if(cpu_find_by_instance(1, NULL, NULL)) | 181 | return 0; |
| 162 | return; /* Not an MP box. */ | 182 | } |
| 163 | for(i = 0; i < NR_CPUS; i++) { | 183 | |
| 164 | if(i == boot_cpu_id) | 184 | void __init smp4m_smp_done(void) |
| 165 | continue; | 185 | { |
| 166 | 186 | int i, first; | |
| 167 | if (cpu_isset(i, cpu_present_map)) { | 187 | int *prev; |
| 168 | extern unsigned long sun4m_cpu_startup; | 188 | |
| 169 | unsigned long *entry = &sun4m_cpu_startup; | 189 | /* setup cpu list for irq rotation */ |
| 170 | struct task_struct *p; | 190 | first = 0; |
| 171 | int timeout; | 191 | prev = &first; |
| 172 | 192 | for (i = 0; i < NR_CPUS; i++) { | |
| 173 | /* Cook up an idler for this guy. */ | 193 | if (cpu_online(i)) { |
| 174 | p = fork_idle(i); | 194 | *prev = i; |
| 175 | cpucount++; | 195 | prev = &cpu_data(i).next; |
| 176 | current_set[i] = task_thread_info(p); | ||
| 177 | /* See trampoline.S for details... */ | ||
| 178 | entry += ((i-1) * 3); | ||
| 179 | |||
| 180 | /* | ||
| 181 | * Initialize the contexts table | ||
| 182 | * Since the call to prom_startcpu() trashes the structure, | ||
| 183 | * we need to re-initialize it for each cpu | ||
| 184 | */ | ||
| 185 | smp_penguin_ctable.which_io = 0; | ||
| 186 | smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; | ||
| 187 | smp_penguin_ctable.reg_size = 0; | ||
| 188 | |||
| 189 | /* whirrr, whirrr, whirrrrrrrrr... */ | ||
| 190 | printk("Starting CPU %d at %p\n", i, entry); | ||
| 191 | local_flush_cache_all(); | ||
| 192 | prom_startcpu(cpu_data(i).prom_node, | ||
| 193 | &smp_penguin_ctable, 0, (char *)entry); | ||
| 194 | |||
| 195 | /* wheee... it's going... */ | ||
| 196 | for(timeout = 0; timeout < 10000; timeout++) { | ||
| 197 | if(cpu_callin_map[i]) | ||
| 198 | break; | ||
| 199 | udelay(200); | ||
| 200 | } | ||
| 201 | if(cpu_callin_map[i]) { | ||
| 202 | /* Another "Red Snapper". */ | ||
| 203 | __cpu_number_map[i] = i; | ||
| 204 | __cpu_logical_map[i] = i; | ||
| 205 | } else { | ||
| 206 | cpucount--; | ||
| 207 | printk("Processor %d is stuck.\n", i); | ||
| 208 | } | ||
| 209 | } | ||
| 210 | if(!(cpu_callin_map[i])) { | ||
| 211 | cpu_clear(i, cpu_present_map); | ||
| 212 | __cpu_number_map[i] = -1; | ||
| 213 | } | 196 | } |
| 214 | } | 197 | } |
| 198 | *prev = first; | ||
| 215 | local_flush_cache_all(); | 199 | local_flush_cache_all(); |
| 216 | if(cpucount == 0) { | ||
| 217 | printk("Error: only one Processor found.\n"); | ||
| 218 | cpu_present_map = cpumask_of_cpu(smp_processor_id()); | ||
| 219 | } else { | ||
| 220 | unsigned long bogosum = 0; | ||
| 221 | for_each_present_cpu(i) | ||
| 222 | bogosum += cpu_data(i).udelay_val; | ||
| 223 | printk("Total of %d Processors activated (%lu.%02lu BogoMIPS).\n", | ||
| 224 | cpucount + 1, | ||
| 225 | bogosum/(500000/HZ), | ||
| 226 | (bogosum/(5000/HZ))%100); | ||
| 227 | smp_activated = 1; | ||
| 228 | smp_num_cpus = cpucount + 1; | ||
| 229 | } | ||
| 230 | 200 | ||
| 231 | /* Free unneeded trap tables */ | 201 | /* Free unneeded trap tables */ |
| 232 | if (!cpu_isset(i, cpu_present_map)) { | 202 | if (!cpu_isset(1, cpu_present_map)) { |
| 233 | ClearPageReserved(virt_to_page(trapbase_cpu1)); | 203 | ClearPageReserved(virt_to_page(trapbase_cpu1)); |
| 234 | init_page_count(virt_to_page(trapbase_cpu1)); | 204 | init_page_count(virt_to_page(trapbase_cpu1)); |
| 235 | free_page((unsigned long)trapbase_cpu1); | 205 | free_page((unsigned long)trapbase_cpu1); |
| @@ -263,6 +233,9 @@ void __init smp4m_boot_cpus(void) | |||
| 263 | */ | 233 | */ |
| 264 | void smp4m_irq_rotate(int cpu) | 234 | void smp4m_irq_rotate(int cpu) |
| 265 | { | 235 | { |
| 236 | int next = cpu_data(cpu).next; | ||
| 237 | if (next != cpu) | ||
| 238 | set_irq_udt(next); | ||
| 266 | } | 239 | } |
| 267 | 240 | ||
| 268 | /* Cross calls, in order to work efficiently and atomically do all | 241 | /* Cross calls, in order to work efficiently and atomically do all |
| @@ -289,7 +262,7 @@ void smp4m_message_pass(int target, int msg, unsigned long data, int wait) | |||
| 289 | 262 | ||
| 290 | smp_cpu_in_msg[me]++; | 263 | smp_cpu_in_msg[me]++; |
| 291 | if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) { | 264 | if(target == MSG_ALL_BUT_SELF || target == MSG_ALL) { |
| 292 | mask = cpu_present_map; | 265 | mask = cpu_online_map; |
| 293 | if(target == MSG_ALL_BUT_SELF) | 266 | if(target == MSG_ALL_BUT_SELF) |
| 294 | cpu_clear(me, mask); | 267 | cpu_clear(me, mask); |
| 295 | for(i = 0; i < 4; i++) { | 268 | for(i = 0; i < 4; i++) { |
| @@ -314,8 +287,8 @@ static struct smp_funcall { | |||
| 314 | unsigned long arg3; | 287 | unsigned long arg3; |
| 315 | unsigned long arg4; | 288 | unsigned long arg4; |
| 316 | unsigned long arg5; | 289 | unsigned long arg5; |
| 317 | unsigned long processors_in[NR_CPUS]; /* Set when ipi entered. */ | 290 | unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */ |
| 318 | unsigned long processors_out[NR_CPUS]; /* Set when ipi exited. */ | 291 | unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */ |
| 319 | } ccall_info; | 292 | } ccall_info; |
| 320 | 293 | ||
| 321 | static DEFINE_SPINLOCK(cross_call_lock); | 294 | static DEFINE_SPINLOCK(cross_call_lock); |
| @@ -324,8 +297,7 @@ static DEFINE_SPINLOCK(cross_call_lock); | |||
| 324 | void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, | 297 | void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, |
| 325 | unsigned long arg3, unsigned long arg4, unsigned long arg5) | 298 | unsigned long arg3, unsigned long arg4, unsigned long arg5) |
| 326 | { | 299 | { |
| 327 | if(smp_processors_ready) { | 300 | register int ncpus = SUN4M_NCPUS; |
| 328 | register int ncpus = smp_num_cpus; | ||
| 329 | unsigned long flags; | 301 | unsigned long flags; |
| 330 | 302 | ||
| 331 | spin_lock_irqsave(&cross_call_lock, flags); | 303 | spin_lock_irqsave(&cross_call_lock, flags); |
| @@ -340,7 +312,7 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, | |||
| 340 | 312 | ||
| 341 | /* Init receive/complete mapping, plus fire the IPI's off. */ | 313 | /* Init receive/complete mapping, plus fire the IPI's off. */ |
| 342 | { | 314 | { |
| 343 | cpumask_t mask = cpu_present_map; | 315 | cpumask_t mask = cpu_online_map; |
| 344 | register int i; | 316 | register int i; |
| 345 | 317 | ||
| 346 | cpu_clear(smp_processor_id(), mask); | 318 | cpu_clear(smp_processor_id(), mask); |
| @@ -373,7 +345,6 @@ void smp4m_cross_call(smpfunc_t func, unsigned long arg1, unsigned long arg2, | |||
| 373 | } | 345 | } |
| 374 | 346 | ||
| 375 | spin_unlock_irqrestore(&cross_call_lock, flags); | 347 | spin_unlock_irqrestore(&cross_call_lock, flags); |
| 376 | } | ||
| 377 | } | 348 | } |
| 378 | 349 | ||
| 379 | /* Running cross calls. */ | 350 | /* Running cross calls. */ |
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c index 27b0e0ba8581..58c65cc8d0d3 100644 --- a/arch/sparc/mm/srmmu.c +++ b/arch/sparc/mm/srmmu.c | |||
| @@ -1302,7 +1302,12 @@ void __init srmmu_paging_init(void) | |||
| 1302 | 1302 | ||
| 1303 | flush_cache_all(); | 1303 | flush_cache_all(); |
| 1304 | srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); | 1304 | srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); |
| 1305 | #ifdef CONFIG_SMP | ||
| 1306 | /* Stop from hanging here... */ | ||
| 1307 | local_flush_tlb_all(); | ||
| 1308 | #else | ||
| 1305 | flush_tlb_all(); | 1309 | flush_tlb_all(); |
| 1310 | #endif | ||
| 1306 | poke_srmmu(); | 1311 | poke_srmmu(); |
| 1307 | 1312 | ||
| 1308 | #ifdef CONFIG_SUN_IO | 1313 | #ifdef CONFIG_SUN_IO |
| @@ -1419,6 +1424,7 @@ static void __init init_vac_layout(void) | |||
| 1419 | max_size = vac_cache_size; | 1424 | max_size = vac_cache_size; |
| 1420 | if(vac_line_size < min_line_size) | 1425 | if(vac_line_size < min_line_size) |
| 1421 | min_line_size = vac_line_size; | 1426 | min_line_size = vac_line_size; |
| 1427 | //FIXME: cpus not contiguous!! | ||
| 1422 | cpu++; | 1428 | cpu++; |
| 1423 | if (cpu >= NR_CPUS || !cpu_online(cpu)) | 1429 | if (cpu >= NR_CPUS || !cpu_online(cpu)) |
| 1424 | break; | 1430 | break; |
diff --git a/include/asm-sparc/cpudata.h b/include/asm-sparc/cpudata.h index ec0d9ef90a3b..a2c4d51d36c4 100644 --- a/include/asm-sparc/cpudata.h +++ b/include/asm-sparc/cpudata.h | |||
| @@ -18,6 +18,7 @@ typedef struct { | |||
| 18 | unsigned int counter; | 18 | unsigned int counter; |
| 19 | int prom_node; | 19 | int prom_node; |
| 20 | int mid; | 20 | int mid; |
| 21 | int next; | ||
| 21 | } cpuinfo_sparc; | 22 | } cpuinfo_sparc; |
| 22 | 23 | ||
| 23 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); | 24 | DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data); |
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h index 580c51d011df..98c46e3fbe8a 100644 --- a/include/asm-sparc/smp.h +++ b/include/asm-sparc/smp.h | |||
| @@ -81,16 +81,9 @@ static inline int smp_call_function(void (*func)(void *info), void *info, int no | |||
| 81 | return 0; | 81 | return 0; |
| 82 | } | 82 | } |
| 83 | 83 | ||
| 84 | extern __volatile__ int __cpu_number_map[NR_CPUS]; | ||
| 85 | extern __volatile__ int __cpu_logical_map[NR_CPUS]; | ||
| 86 | |||
| 87 | static inline int cpu_logical_map(int cpu) | 84 | static inline int cpu_logical_map(int cpu) |
| 88 | { | 85 | { |
| 89 | return __cpu_logical_map[cpu]; | 86 | return cpu; |
| 90 | } | ||
| 91 | static inline int cpu_number_map(int cpu) | ||
| 92 | { | ||
| 93 | return __cpu_number_map[cpu]; | ||
| 94 | } | 87 | } |
| 95 | 88 | ||
| 96 | static inline int hard_smp4m_processor_id(void) | 89 | static inline int hard_smp4m_processor_id(void) |
diff --git a/include/asm-sparc/spinlock.h b/include/asm-sparc/spinlock.h index e344c98a6f5f..3350c90c7869 100644 --- a/include/asm-sparc/spinlock.h +++ b/include/asm-sparc/spinlock.h | |||
| @@ -94,7 +94,7 @@ static inline void __read_lock(raw_rwlock_t *rw) | |||
| 94 | #define __raw_read_lock(lock) \ | 94 | #define __raw_read_lock(lock) \ |
| 95 | do { unsigned long flags; \ | 95 | do { unsigned long flags; \ |
| 96 | local_irq_save(flags); \ | 96 | local_irq_save(flags); \ |
| 97 | __raw_read_lock(lock); \ | 97 | __read_lock(lock); \ |
| 98 | local_irq_restore(flags); \ | 98 | local_irq_restore(flags); \ |
| 99 | } while(0) | 99 | } while(0) |
| 100 | 100 | ||
| @@ -114,11 +114,11 @@ static inline void __read_unlock(raw_rwlock_t *rw) | |||
| 114 | #define __raw_read_unlock(lock) \ | 114 | #define __raw_read_unlock(lock) \ |
| 115 | do { unsigned long flags; \ | 115 | do { unsigned long flags; \ |
| 116 | local_irq_save(flags); \ | 116 | local_irq_save(flags); \ |
| 117 | __raw_read_unlock(lock); \ | 117 | __read_unlock(lock); \ |
| 118 | local_irq_restore(flags); \ | 118 | local_irq_restore(flags); \ |
| 119 | } while(0) | 119 | } while(0) |
| 120 | 120 | ||
| 121 | extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) | 121 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 122 | { | 122 | { |
| 123 | register raw_rwlock_t *lp asm("g1"); | 123 | register raw_rwlock_t *lp asm("g1"); |
| 124 | lp = rw; | 124 | lp = rw; |
| @@ -131,9 +131,28 @@ extern __inline__ void __raw_write_lock(raw_rwlock_t *rw) | |||
| 131 | : "g2", "g4", "memory", "cc"); | 131 | : "g2", "g4", "memory", "cc"); |
| 132 | } | 132 | } |
| 133 | 133 | ||
| 134 | static inline int __raw_write_trylock(raw_rwlock_t *rw) | ||
| 135 | { | ||
| 136 | unsigned int val; | ||
| 137 | |||
| 138 | __asm__ __volatile__("ldstub [%1 + 3], %0" | ||
| 139 | : "=r" (val) | ||
| 140 | : "r" (&rw->lock) | ||
| 141 | : "memory"); | ||
| 142 | |||
| 143 | if (val == 0) { | ||
| 144 | val = rw->lock & ~0xff; | ||
| 145 | if (val) | ||
| 146 | ((volatile u8*)&rw->lock)[3] = 0; | ||
| 147 | } | ||
| 148 | |||
| 149 | return (val == 0); | ||
| 150 | } | ||
| 151 | |||
| 134 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) | 152 | #define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) |
| 135 | 153 | ||
| 136 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) | 154 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
| 155 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) | ||
| 137 | 156 | ||
| 138 | #endif /* !(__ASSEMBLY__) */ | 157 | #endif /* !(__ASSEMBLY__) */ |
| 139 | 158 | ||
