diff options
author | David S. Miller <davem@sunset.davemloft.net> | 2007-10-14 02:41:28 -0400 |
---|---|---|
committer | David S. Miller <davem@sunset.davemloft.net> | 2007-10-14 02:41:28 -0400 |
commit | 771823007fa2acb15a726e7e1f8502d4f350b1fa (patch) | |
tree | ea8e7f8ad464436e77b7a46a9c1fbd27fa4ffee6 | |
parent | bb74b734a68544ad8f1537a33a7e22c73c87ed1c (diff) |
[SPARC64]: Kill ugly __bucket() macro.
All the users go through virt_irq_to_bucket() and essentially
want to go from a virt_irq to an INO, but we have a way
to do that already via virt_to_real_irq_table[].dev_ino.
This also allows us to kill both virt_to_real_irq() and
virt_irq_to_bucket().
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | arch/sparc64/kernel/irq.c | 238 |
1 files changed, 92 insertions, 146 deletions
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c index f8f704524ed9..2be6bcbe50ca 100644 --- a/arch/sparc64/kernel/irq.c +++ b/arch/sparc64/kernel/irq.c | |||
@@ -122,7 +122,6 @@ static void bucket_set_virt_irq(unsigned long bucket_pa, | |||
122 | "i" (ASI_PHYS_USE_EC)); | 122 | "i" (ASI_PHYS_USE_EC)); |
123 | } | 123 | } |
124 | 124 | ||
125 | #define __bucket(irq) ((struct ino_bucket *)(irq)) | ||
126 | #define __irq(bucket) ((unsigned long)(bucket)) | 125 | #define __irq(bucket) ((unsigned long)(bucket)) |
127 | 126 | ||
128 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) | 127 | #define irq_work_pa(__cpu) &(trap_block[(__cpu)].irq_worklist_pa) |
@@ -179,11 +178,6 @@ void virt_irq_free(unsigned int virt_irq) | |||
179 | } | 178 | } |
180 | #endif | 179 | #endif |
181 | 180 | ||
182 | static unsigned long virt_to_real_irq(unsigned char virt_irq) | ||
183 | { | ||
184 | return virt_to_real_irq_table[virt_irq].irq; | ||
185 | } | ||
186 | |||
187 | /* | 181 | /* |
188 | * /proc/interrupts printing: | 182 | * /proc/interrupts printing: |
189 | */ | 183 | */ |
@@ -270,17 +264,6 @@ struct irq_handler_data { | |||
270 | void *pre_handler_arg2; | 264 | void *pre_handler_arg2; |
271 | }; | 265 | }; |
272 | 266 | ||
273 | static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq) | ||
274 | { | ||
275 | unsigned long real_irq = virt_to_real_irq(virt_irq); | ||
276 | struct ino_bucket *bucket = NULL; | ||
277 | |||
278 | if (likely(real_irq)) | ||
279 | bucket = __bucket(real_irq); | ||
280 | |||
281 | return bucket; | ||
282 | } | ||
283 | |||
284 | #ifdef CONFIG_SMP | 267 | #ifdef CONFIG_SMP |
285 | static int irq_choose_cpu(unsigned int virt_irq) | 268 | static int irq_choose_cpu(unsigned int virt_irq) |
286 | { | 269 | { |
@@ -380,178 +363,142 @@ static void sun4u_irq_end(unsigned int virt_irq) | |||
380 | 363 | ||
381 | static void sun4v_irq_enable(unsigned int virt_irq) | 364 | static void sun4v_irq_enable(unsigned int virt_irq) |
382 | { | 365 | { |
383 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 366 | unsigned int ino = virt_to_real_irq_table[virt_irq].dev_ino; |
384 | unsigned int ino = bucket - &ivector_table[0]; | 367 | unsigned long cpuid = irq_choose_cpu(virt_irq); |
368 | int err; | ||
385 | 369 | ||
386 | if (likely(bucket)) { | 370 | err = sun4v_intr_settarget(ino, cpuid); |
387 | unsigned long cpuid; | 371 | if (err != HV_EOK) |
388 | int err; | 372 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " |
389 | 373 | "err(%d)\n", ino, cpuid, err); | |
390 | cpuid = irq_choose_cpu(virt_irq); | 374 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
391 | 375 | if (err != HV_EOK) | |
392 | err = sun4v_intr_settarget(ino, cpuid); | 376 | printk(KERN_ERR "sun4v_intr_setstate(%x): " |
393 | if (err != HV_EOK) | 377 | "err(%d)\n", ino, err); |
394 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | 378 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); |
395 | "err(%d)\n", ino, cpuid, err); | 379 | if (err != HV_EOK) |
396 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | 380 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", |
397 | if (err != HV_EOK) | 381 | ino, err); |
398 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | ||
399 | "err(%d)\n", ino, err); | ||
400 | err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED); | ||
401 | if (err != HV_EOK) | ||
402 | printk(KERN_ERR "sun4v_intr_setenabled(%x): err(%d)\n", | ||
403 | ino, err); | ||
404 | } | ||
405 | } | 382 | } |
406 | 383 | ||
407 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) | 384 | static void sun4v_set_affinity(unsigned int virt_irq, cpumask_t mask) |
408 | { | 385 | { |
409 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 386 | unsigned int ino = virt_to_real_irq_table[virt_irq].dev_ino; |
410 | unsigned int ino = bucket - &ivector_table[0]; | 387 | unsigned long cpuid = irq_choose_cpu(virt_irq); |
388 | int err; | ||
411 | 389 | ||
412 | if (likely(bucket)) { | 390 | err = sun4v_intr_settarget(ino, cpuid); |
413 | unsigned long cpuid; | 391 | if (err != HV_EOK) |
414 | int err; | 392 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " |
415 | 393 | "err(%d)\n", ino, cpuid, err); | |
416 | cpuid = irq_choose_cpu(virt_irq); | ||
417 | |||
418 | err = sun4v_intr_settarget(ino, cpuid); | ||
419 | if (err != HV_EOK) | ||
420 | printk(KERN_ERR "sun4v_intr_settarget(%x,%lu): " | ||
421 | "err(%d)\n", ino, cpuid, err); | ||
422 | } | ||
423 | } | 394 | } |
424 | 395 | ||
425 | static void sun4v_irq_disable(unsigned int virt_irq) | 396 | static void sun4v_irq_disable(unsigned int virt_irq) |
426 | { | 397 | { |
427 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 398 | unsigned int ino = virt_to_real_irq_table[virt_irq].dev_ino; |
428 | unsigned int ino = bucket - &ivector_table[0]; | 399 | int err; |
429 | 400 | ||
430 | if (likely(bucket)) { | 401 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); |
431 | int err; | 402 | if (err != HV_EOK) |
432 | 403 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " | |
433 | err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); | 404 | "err(%d)\n", ino, err); |
434 | if (err != HV_EOK) | ||
435 | printk(KERN_ERR "sun4v_intr_setenabled(%x): " | ||
436 | "err(%d)\n", ino, err); | ||
437 | } | ||
438 | } | 405 | } |
439 | 406 | ||
440 | static void sun4v_irq_end(unsigned int virt_irq) | 407 | static void sun4v_irq_end(unsigned int virt_irq) |
441 | { | 408 | { |
442 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 409 | unsigned int ino = virt_to_real_irq_table[virt_irq].dev_ino; |
443 | unsigned int ino = bucket - &ivector_table[0]; | ||
444 | struct irq_desc *desc = irq_desc + virt_irq; | 410 | struct irq_desc *desc = irq_desc + virt_irq; |
411 | int err; | ||
445 | 412 | ||
446 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | 413 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) |
447 | return; | 414 | return; |
448 | 415 | ||
449 | if (likely(bucket)) { | 416 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); |
450 | int err; | 417 | if (err != HV_EOK) |
451 | 418 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | |
452 | err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); | 419 | "err(%d)\n", ino, err); |
453 | if (err != HV_EOK) | ||
454 | printk(KERN_ERR "sun4v_intr_setstate(%x): " | ||
455 | "err(%d)\n", ino, err); | ||
456 | } | ||
457 | } | 420 | } |
458 | 421 | ||
459 | static void sun4v_virq_enable(unsigned int virt_irq) | 422 | static void sun4v_virq_enable(unsigned int virt_irq) |
460 | { | 423 | { |
461 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 424 | unsigned long cpuid, dev_handle, dev_ino; |
462 | 425 | int err; | |
463 | if (likely(bucket)) { | 426 | |
464 | unsigned long cpuid, dev_handle, dev_ino; | 427 | cpuid = irq_choose_cpu(virt_irq); |
465 | int err; | 428 | |
466 | 429 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | |
467 | cpuid = irq_choose_cpu(virt_irq); | 430 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; |
468 | 431 | ||
469 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 432 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
470 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 433 | if (err != HV_EOK) |
471 | 434 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | |
472 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | 435 | "err(%d)\n", |
473 | if (err != HV_EOK) | 436 | dev_handle, dev_ino, cpuid, err); |
474 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | 437 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
475 | "err(%d)\n", | 438 | HV_INTR_STATE_IDLE); |
476 | dev_handle, dev_ino, cpuid, err); | 439 | if (err != HV_EOK) |
477 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | 440 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
478 | HV_INTR_STATE_IDLE); | 441 | "HV_INTR_STATE_IDLE): err(%d)\n", |
479 | if (err != HV_EOK) | 442 | dev_handle, dev_ino, err); |
480 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | 443 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
481 | "HV_INTR_STATE_IDLE): err(%d)\n", | 444 | HV_INTR_ENABLED); |
482 | dev_handle, dev_ino, err); | 445 | if (err != HV_EOK) |
483 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | 446 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
484 | HV_INTR_ENABLED); | 447 | "HV_INTR_ENABLED): err(%d)\n", |
485 | if (err != HV_EOK) | 448 | dev_handle, dev_ino, err); |
486 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
487 | "HV_INTR_ENABLED): err(%d)\n", | ||
488 | dev_handle, dev_ino, err); | ||
489 | } | ||
490 | } | 449 | } |
491 | 450 | ||
492 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) | 451 | static void sun4v_virt_set_affinity(unsigned int virt_irq, cpumask_t mask) |
493 | { | 452 | { |
494 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 453 | unsigned long cpuid, dev_handle, dev_ino; |
495 | 454 | int err; | |
496 | if (likely(bucket)) { | ||
497 | unsigned long cpuid, dev_handle, dev_ino; | ||
498 | int err; | ||
499 | 455 | ||
500 | cpuid = irq_choose_cpu(virt_irq); | 456 | cpuid = irq_choose_cpu(virt_irq); |
501 | 457 | ||
502 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 458 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; |
503 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 459 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; |
504 | 460 | ||
505 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); | 461 | err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); |
506 | if (err != HV_EOK) | 462 | if (err != HV_EOK) |
507 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " | 463 | printk(KERN_ERR "sun4v_vintr_set_target(%lx,%lx,%lu): " |
508 | "err(%d)\n", | 464 | "err(%d)\n", |
509 | dev_handle, dev_ino, cpuid, err); | 465 | dev_handle, dev_ino, cpuid, err); |
510 | } | ||
511 | } | 466 | } |
512 | 467 | ||
513 | static void sun4v_virq_disable(unsigned int virt_irq) | 468 | static void sun4v_virq_disable(unsigned int virt_irq) |
514 | { | 469 | { |
515 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 470 | unsigned long dev_handle, dev_ino; |
471 | int err; | ||
516 | 472 | ||
517 | if (likely(bucket)) { | 473 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; |
518 | unsigned long dev_handle, dev_ino; | 474 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; |
519 | int err; | ||
520 | 475 | ||
521 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 476 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, |
522 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 477 | HV_INTR_DISABLED); |
523 | 478 | if (err != HV_EOK) | |
524 | err = sun4v_vintr_set_valid(dev_handle, dev_ino, | 479 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
525 | HV_INTR_DISABLED); | 480 | "HV_INTR_DISABLED): err(%d)\n", |
526 | if (err != HV_EOK) | 481 | dev_handle, dev_ino, err); |
527 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
528 | "HV_INTR_DISABLED): err(%d)\n", | ||
529 | dev_handle, dev_ino, err); | ||
530 | } | ||
531 | } | 482 | } |
532 | 483 | ||
533 | static void sun4v_virq_end(unsigned int virt_irq) | 484 | static void sun4v_virq_end(unsigned int virt_irq) |
534 | { | 485 | { |
535 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | ||
536 | struct irq_desc *desc = irq_desc + virt_irq; | 486 | struct irq_desc *desc = irq_desc + virt_irq; |
487 | unsigned long dev_handle, dev_ino; | ||
488 | int err; | ||
537 | 489 | ||
538 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) | 490 | if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) |
539 | return; | 491 | return; |
540 | 492 | ||
541 | if (likely(bucket)) { | 493 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; |
542 | unsigned long dev_handle, dev_ino; | 494 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; |
543 | int err; | ||
544 | 495 | ||
545 | dev_handle = virt_to_real_irq_table[virt_irq].dev_handle; | 496 | err = sun4v_vintr_set_state(dev_handle, dev_ino, |
546 | dev_ino = virt_to_real_irq_table[virt_irq].dev_ino; | 497 | HV_INTR_STATE_IDLE); |
547 | 498 | if (err != HV_EOK) | |
548 | err = sun4v_vintr_set_state(dev_handle, dev_ino, | 499 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," |
549 | HV_INTR_STATE_IDLE); | 500 | "HV_INTR_STATE_IDLE): err(%d)\n", |
550 | if (err != HV_EOK) | 501 | dev_handle, dev_ino, err); |
551 | printk(KERN_ERR "sun4v_vintr_set_state(%lx,%lx," | ||
552 | "HV_INTR_STATE_IDLE): err(%d)\n", | ||
553 | dev_handle, dev_ino, err); | ||
554 | } | ||
555 | } | 502 | } |
556 | 503 | ||
557 | static void run_pre_handler(unsigned int virt_irq) | 504 | static void run_pre_handler(unsigned int virt_irq) |
@@ -749,11 +696,10 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) | |||
749 | 696 | ||
750 | void ack_bad_irq(unsigned int virt_irq) | 697 | void ack_bad_irq(unsigned int virt_irq) |
751 | { | 698 | { |
752 | struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq); | 699 | unsigned int ino = virt_to_real_irq_table[virt_irq].dev_ino; |
753 | unsigned int ino = 0xdeadbeef; | ||
754 | 700 | ||
755 | if (bucket) | 701 | if (!ino) |
756 | ino = bucket - &ivector_table[0]; | 702 | ino = 0xdeadbeef; |
757 | 703 | ||
758 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", | 704 | printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", |
759 | ino, virt_irq); | 705 | ino, virt_irq); |