aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/kernel/pci_sabre.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2005-07-04 16:24:38 -0400
committerDavid S. Miller <davem@davemloft.net>2005-07-04 16:24:38 -0400
commit088dd1f81b3577c17c4c4381696bf2105ea0e43a (patch)
tree11fda00dc3ae5c3202c6c0bb0a22fa3235f4f101 /arch/sparc64/kernel/pci_sabre.c
parent06326e40b7c66477d4a460bfc23c951f7b39f191 (diff)
[SPARC64]: Add support for IRQ pre-handlers.
This allows a PCI controller to shim into IRQ delivery so that DMA queues can be drained, if necessary. If some bus specific code needs to run before an IRQ handler is invoked, the bus driver simply needs to setup the function pointer in bucket->irq_info->pre_handler and the two args bucket->irq_info->pre_handler_arg[12]. The Schizo PCI driver is converted over to use a pre-handler for the DMA write-sync processing it needs when a device is behind a PCI->PCI bus deeper than the top-level APB bridges. While we're here, clean up all of the action allocation and handling. Now, we allocate the irqaction as part of the bucket->irq_info area. There is an array of 4 irqaction (for PCI irq sharing) and a bitmask saying which entries are active. The bucket->irq_info is allocated at build_irq() time, not at request_irq() time. This simplifies request_irq() and free_irq() tremendously. The SMP dynamic IRQ retargetting code got removed in this change too. It was disabled for a few months now, and we can resurrect it in the future if we want. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/kernel/pci_sabre.c')
-rw-r--r--arch/sparc64/kernel/pci_sabre.c46
1 files changed, 26 insertions, 20 deletions
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 53d333b4a4e8..52bf3431a422 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -595,6 +595,23 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
595 return ret; 595 return ret;
596} 596}
597 597
598/* When a device lives behind a bridge deeper in the PCI bus topology
599 * than APB, a special sequence must run to make sure all pending DMA
600 * transfers at the time of IRQ delivery are visible in the coherency
601 * domain by the cpu. This sequence is to perform a read on the far
602 * side of the non-APB bridge, then perform a read of Sabre's DMA
603 * write-sync register.
604 */
605static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
606{
607 struct pci_dev *pdev = _arg1;
608 unsigned long sync_reg = (unsigned long) _arg2;
609 u16 _unused;
610
611 pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused);
612 sabre_read(sync_reg);
613}
614
598static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, 615static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
599 struct pci_dev *pdev, 616 struct pci_dev *pdev,
600 unsigned int ino) 617 unsigned int ino)
@@ -639,24 +656,14 @@ static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
639 if (pdev) { 656 if (pdev) {
640 struct pcidev_cookie *pcp = pdev->sysdata; 657 struct pcidev_cookie *pcp = pdev->sysdata;
641 658
642 /* When a device lives behind a bridge deeper in the
643 * PCI bus topology than APB, a special sequence must
644 * run to make sure all pending DMA transfers at the
645 * time of IRQ delivery are visible in the coherency
646 * domain by the cpu. This sequence is to perform
647 * a read on the far side of the non-APB bridge, then
648 * perform a read of Sabre's DMA write-sync register.
649 *
650 * Currently, the PCI_CONFIG register for the device
651 * is used for this read from the far side of the bridge.
652 */
653 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 659 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
654 bucket->flags |= IBF_DMA_SYNC; 660 struct pci_controller_info *p = pcp->pbm->parent;
655 bucket->synctab_ent = dma_sync_reg_table_entry++; 661 struct irq_desc *d = bucket->irq_info;
656 dma_sync_reg_table[bucket->synctab_ent] = 662
657 (unsigned long) sabre_pci_config_mkaddr( 663 d->pre_handler = sabre_wsync_handler;
658 pcp->pbm, 664 d->pre_handler_arg1 = pdev;
659 pdev->bus->number, pdev->devfn, PCI_COMMAND); 665 d->pre_handler_arg2 = (void *)
666 p->pbm_A.controller_regs + SABRE_WRSYNC;
660 } 667 }
661 } 668 }
662 return __irq(bucket); 669 return __irq(bucket);
@@ -1626,10 +1633,9 @@ void __init sabre_init(int pnode, char *model_name)
1626 */ 1633 */
1627 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1634 p->pbm_A.controller_regs = pr_regs[0].phys_addr;
1628 p->pbm_B.controller_regs = pr_regs[0].phys_addr; 1635 p->pbm_B.controller_regs = pr_regs[0].phys_addr;
1629 pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC;
1630 1636
1631 printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n", 1637 printk("PCI: Found SABRE, main regs at %016lx\n",
1632 p->pbm_A.controller_regs, pci_dma_wsync); 1638 p->pbm_A.controller_regs);
1633 1639
1634 /* Clear interrupts */ 1640 /* Clear interrupts */
1635 1641