aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/irq.c
diff options
context:
space:
mode:
authorBob Breuer <breuerr@mc.net>2006-03-24 01:36:19 -0500
committerDavid S. Miller <davem@davemloft.net>2006-03-24 01:36:19 -0500
commita54123e27779049d27d21e6c8adfee73aa2c0734 (patch)
tree265849e706e4ebe3b75127ebe6e3cbfe2a78850a /arch/sparc/kernel/irq.c
parent674a396c6d2ba0341ebdd7c1c9950f32f018e2dd (diff)
[SPARC]: Try to start getting SMP back into shape.
Todo items: - IRQ_INPROGRESS flag - use sparc64 irq buckets, or generic irq_desc? - sun4d - re-indent large chunks of sun4m_smp.c - some places assume sequential cpu numbering (i.e. 0,1 instead of 0,2) Last I checked (with 2.6.14), random programs segfault with dual HyperSPARC. And with SuperSPARC II's, it seems stable but will eventually die from a write lock error (wrong lock owner or something). I haven't tried the HyperSPARC + highmem combination recently, so that may still be a problem. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/kernel/irq.c')
-rw-r--r--arch/sparc/kernel/irq.c66
1 files changed, 34 insertions, 32 deletions
diff --git a/arch/sparc/kernel/irq.c b/arch/sparc/kernel/irq.c
index 4c60a6ef54a9..aac8af5aae51 100644
--- a/arch/sparc/kernel/irq.c
+++ b/arch/sparc/kernel/irq.c
@@ -154,9 +154,11 @@ void (*sparc_init_timers)(irqreturn_t (*)(int, void *,struct pt_regs *)) =
154struct irqaction static_irqaction[MAX_STATIC_ALLOC]; 154struct irqaction static_irqaction[MAX_STATIC_ALLOC];
155int static_irq_count; 155int static_irq_count;
156 156
157struct irqaction *irq_action[NR_IRQS] = { 157struct {
158 [0 ... (NR_IRQS-1)] = NULL 158 struct irqaction *action;
159}; 159 int flags;
160} sparc_irq[NR_IRQS];
161#define SPARC_IRQ_INPROGRESS 1
160 162
161/* Used to protect the IRQ action lists */ 163/* Used to protect the IRQ action lists */
162DEFINE_SPINLOCK(irq_action_lock); 164DEFINE_SPINLOCK(irq_action_lock);
@@ -177,7 +179,7 @@ int show_interrupts(struct seq_file *p, void *v)
177 } 179 }
178 spin_lock_irqsave(&irq_action_lock, flags); 180 spin_lock_irqsave(&irq_action_lock, flags);
179 if (i < NR_IRQS) { 181 if (i < NR_IRQS) {
180 action = *(i + irq_action); 182 action = sparc_irq[i].action;
181 if (!action) 183 if (!action)
182 goto out_unlock; 184 goto out_unlock;
183 seq_printf(p, "%3d: ", i); 185 seq_printf(p, "%3d: ", i);
@@ -186,7 +188,7 @@ int show_interrupts(struct seq_file *p, void *v)
186#else 188#else
187 for_each_online_cpu(j) { 189 for_each_online_cpu(j) {
188 seq_printf(p, "%10u ", 190 seq_printf(p, "%10u ",
189 kstat_cpu(cpu_logical_map(j)).irqs[i]); 191 kstat_cpu(j).irqs[i]);
190 } 192 }
191#endif 193#endif
192 seq_printf(p, " %c %s", 194 seq_printf(p, " %c %s",
@@ -207,7 +209,7 @@ out_unlock:
207void free_irq(unsigned int irq, void *dev_id) 209void free_irq(unsigned int irq, void *dev_id)
208{ 210{
209 struct irqaction * action; 211 struct irqaction * action;
210 struct irqaction * tmp = NULL; 212 struct irqaction **actionp;
211 unsigned long flags; 213 unsigned long flags;
212 unsigned int cpu_irq; 214 unsigned int cpu_irq;
213 215
@@ -225,7 +227,8 @@ void free_irq(unsigned int irq, void *dev_id)
225 227
226 spin_lock_irqsave(&irq_action_lock, flags); 228 spin_lock_irqsave(&irq_action_lock, flags);
227 229
228 action = *(cpu_irq + irq_action); 230 actionp = &sparc_irq[cpu_irq].action;
231 action = *actionp;
229 232
230 if (!action->handler) { 233 if (!action->handler) {
231 printk("Trying to free free IRQ%d\n",irq); 234 printk("Trying to free free IRQ%d\n",irq);
@@ -235,7 +238,7 @@ void free_irq(unsigned int irq, void *dev_id)
235 for (; action; action = action->next) { 238 for (; action; action = action->next) {
236 if (action->dev_id == dev_id) 239 if (action->dev_id == dev_id)
237 break; 240 break;
238 tmp = action; 241 actionp = &action->next;
239 } 242 }
240 if (!action) { 243 if (!action) {
241 printk("Trying to free free shared IRQ%d\n",irq); 244 printk("Trying to free free shared IRQ%d\n",irq);
@@ -254,11 +257,8 @@ void free_irq(unsigned int irq, void *dev_id)
254 irq, action->name); 257 irq, action->name);
255 goto out_unlock; 258 goto out_unlock;
256 } 259 }
257 260
258 if (action && tmp) 261 *actionp = action->next;
259 tmp->next = action->next;
260 else
261 *(cpu_irq + irq_action) = action->next;
262 262
263 spin_unlock_irqrestore(&irq_action_lock, flags); 263 spin_unlock_irqrestore(&irq_action_lock, flags);
264 264
@@ -268,7 +268,7 @@ void free_irq(unsigned int irq, void *dev_id)
268 268
269 kfree(action); 269 kfree(action);
270 270
271 if (!(*(cpu_irq + irq_action))) 271 if (!sparc_irq[cpu_irq].action)
272 disable_irq(irq); 272 disable_irq(irq);
273 273
274out_unlock: 274out_unlock:
@@ -287,8 +287,11 @@ EXPORT_SYMBOL(free_irq);
287#ifdef CONFIG_SMP 287#ifdef CONFIG_SMP
288void synchronize_irq(unsigned int irq) 288void synchronize_irq(unsigned int irq)
289{ 289{
290 printk("synchronize_irq says: implement me!\n"); 290 unsigned int cpu_irq;
291 BUG(); 291
292 cpu_irq = irq & (NR_IRQS - 1);
293 while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS)
294 cpu_relax();
292} 295}
293#endif /* SMP */ 296#endif /* SMP */
294 297
@@ -299,7 +302,7 @@ void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs)
299 unsigned int cpu_irq; 302 unsigned int cpu_irq;
300 303
301 cpu_irq = irq & (NR_IRQS - 1); 304 cpu_irq = irq & (NR_IRQS - 1);
302 action = *(cpu_irq + irq_action); 305 action = sparc_irq[cpu_irq].action;
303 306
304 printk("IO device interrupt, irq = %d\n", irq); 307 printk("IO device interrupt, irq = %d\n", irq);
305 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, 308 printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc,
@@ -330,7 +333,8 @@ void handler_irq(int irq, struct pt_regs * regs)
330 if(irq < 10) 333 if(irq < 10)
331 smp4m_irq_rotate(cpu); 334 smp4m_irq_rotate(cpu);
332#endif 335#endif
333 action = *(irq + irq_action); 336 action = sparc_irq[irq].action;
337 sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS;
334 kstat_cpu(cpu).irqs[irq]++; 338 kstat_cpu(cpu).irqs[irq]++;
335 do { 339 do {
336 if (!action || !action->handler) 340 if (!action || !action->handler)
@@ -338,6 +342,7 @@ void handler_irq(int irq, struct pt_regs * regs)
338 action->handler(irq, action->dev_id, regs); 342 action->handler(irq, action->dev_id, regs);
339 action = action->next; 343 action = action->next;
340 } while (action); 344 } while (action);
345 sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS;
341 enable_pil_irq(irq); 346 enable_pil_irq(irq);
342 irq_exit(); 347 irq_exit();
343} 348}
@@ -389,7 +394,7 @@ int request_fast_irq(unsigned int irq,
389 394
390 spin_lock_irqsave(&irq_action_lock, flags); 395 spin_lock_irqsave(&irq_action_lock, flags);
391 396
392 action = *(cpu_irq + irq_action); 397 action = sparc_irq[cpu_irq].action;
393 if(action) { 398 if(action) {
394 if(action->flags & SA_SHIRQ) 399 if(action->flags & SA_SHIRQ)
395 panic("Trying to register fast irq when already shared.\n"); 400 panic("Trying to register fast irq when already shared.\n");
@@ -452,7 +457,7 @@ int request_fast_irq(unsigned int irq,
452 action->dev_id = NULL; 457 action->dev_id = NULL;
453 action->next = NULL; 458 action->next = NULL;
454 459
455 *(cpu_irq + irq_action) = action; 460 sparc_irq[cpu_irq].action = action;
456 461
457 enable_irq(irq); 462 enable_irq(irq);
458 463
@@ -467,7 +472,7 @@ int request_irq(unsigned int irq,
467 irqreturn_t (*handler)(int, void *, struct pt_regs *), 472 irqreturn_t (*handler)(int, void *, struct pt_regs *),
468 unsigned long irqflags, const char * devname, void *dev_id) 473 unsigned long irqflags, const char * devname, void *dev_id)
469{ 474{
470 struct irqaction * action, *tmp = NULL; 475 struct irqaction * action, **actionp;
471 unsigned long flags; 476 unsigned long flags;
472 unsigned int cpu_irq; 477 unsigned int cpu_irq;
473 int ret; 478 int ret;
@@ -490,20 +495,20 @@ int request_irq(unsigned int irq,
490 495
491 spin_lock_irqsave(&irq_action_lock, flags); 496 spin_lock_irqsave(&irq_action_lock, flags);
492 497
493 action = *(cpu_irq + irq_action); 498 actionp = &sparc_irq[cpu_irq].action;
499 action = *actionp;
494 if (action) { 500 if (action) {
495 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) { 501 if (!(action->flags & SA_SHIRQ) || !(irqflags & SA_SHIRQ)) {
496 for (tmp = action; tmp->next; tmp = tmp->next);
497 } else {
498 ret = -EBUSY; 502 ret = -EBUSY;
499 goto out_unlock; 503 goto out_unlock;
500 } 504 }
501 if ((action->flags & SA_INTERRUPT) ^ (irqflags & SA_INTERRUPT)) { 505 if ((action->flags & SA_INTERRUPT) != (irqflags & SA_INTERRUPT)) {
502 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); 506 printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq);
503 ret = -EBUSY; 507 ret = -EBUSY;
504 goto out_unlock; 508 goto out_unlock;
505 } 509 }
506 action = NULL; /* Or else! */ 510 for ( ; action; action = *actionp)
511 actionp = &action->next;
507 } 512 }
508 513
509 /* If this is flagged as statically allocated then we use our 514 /* If this is flagged as statically allocated then we use our
@@ -532,10 +537,7 @@ int request_irq(unsigned int irq,
532 action->next = NULL; 537 action->next = NULL;
533 action->dev_id = dev_id; 538 action->dev_id = dev_id;
534 539
535 if (tmp) 540 *actionp = action;
536 tmp->next = action;
537 else
538 *(cpu_irq + irq_action) = action;
539 541
540 enable_irq(irq); 542 enable_irq(irq);
541 543