aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2008-04-19 15:07:18 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-04-25 01:25:57 -0400
commita3718c1f230240361ed92d3e53342df0ff7efa8c (patch)
treec28b8fadb5f926868c6689d83ad08f669d63a948 /drivers/ata
parentfcfb1f77cea81f74d865b4d33f2e452ffa1973e8 (diff)
sata_mv: tidy host controller interrupt handling
Tidy up host controller interrupt handling, by moving the weirdo bit shifting from mv_interrupt() to mv_host_intr(). This lets us take advantage of the MV_PORT_TO_SHIFT_AND_HARDPORT() macro from an earlier patch to greatly simplify the port numbering logic. Also, defer reading the hc_irq_cause (one per hc) until it is actually proven to be needed. This may save a microsecond or so per interrupt, on average (a later patchset will further reduce unnecessary register reads throughout the driver). Apart from that, we still leave the actual IRQ handling logic alone. Subsequent patches in this series will address that. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/sata_mv.c107
1 files changed, 45 insertions, 62 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index cee78f9e9d1b..97da46a86fdd 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1693,50 +1693,48 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1693 * LOCKING: 1693 * LOCKING:
1694 * Inherited from caller. 1694 * Inherited from caller.
1695 */ 1695 */
1696static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) 1696static int mv_host_intr(struct ata_host *host, u32 main_cause)
1697{ 1697{
1698 struct mv_host_priv *hpriv = host->private_data; 1698 struct mv_host_priv *hpriv = host->private_data;
1699 void __iomem *mmio = hpriv->base; 1699 void __iomem *mmio = hpriv->base, *hc_mmio = NULL;
1700 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1700 u32 hc_irq_cause = 0;
1701 u32 hc_irq_cause; 1701 unsigned int handled = 0, port;
1702 int port, port0, last_port;
1703
1704 if (hc == 0)
1705 port0 = 0;
1706 else
1707 port0 = MV_PORTS_PER_HC;
1708
1709 if (HAS_PCI(host))
1710 last_port = port0 + MV_PORTS_PER_HC;
1711 else
1712 last_port = port0 + hpriv->n_ports;
1713 /* we'll need the HC success int register in most cases */
1714 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1715 if (!hc_irq_cause)
1716 return;
1717
1718 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1719 1702
1720 VPRINTK("ENTER, hc%u relevant=0x%08x HC IRQ cause=0x%08x\n", 1703 for (port = 0; port < hpriv->n_ports; port++) {
1721 hc, relevant, hc_irq_cause);
1722
1723 for (port = port0; port < last_port; port++) {
1724 struct ata_port *ap = host->ports[port]; 1704 struct ata_port *ap = host->ports[port];
1725 struct mv_port_priv *pp; 1705 struct mv_port_priv *pp;
1726 int have_err_bits, hardport, shift; 1706 unsigned int shift, hardport, port_cause;
1727 1707 /*
1728 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) 1708 * When we move to the second hc, flag our cached
1709 * copies of hc_mmio (and hc_irq_cause) as invalid again.
1710 */
1711 if (port == MV_PORTS_PER_HC)
1712 hc_mmio = NULL;
1713 /*
1714 * Do nothing if port is not interrupting or is disabled:
1715 */
1716 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1717 port_cause = (main_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1718 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1729 continue; 1719 continue;
1720 /*
1721 * Each hc within the host has its own hc_irq_cause register.
1722 * We defer reading it until we know we need it, right now:
1723 *
1724 * FIXME later: we don't really need to read this register
1725 * (some logic changes required below if we go that way),
1726 * because it doesn't tell us anything new. But we do need
1727 * to write to it, outside the top of this loop,
1728 * to reset the interrupt triggers for next time.
1729 */
1730 if (!hc_mmio) {
1731 hc_mmio = mv_hc_base_from_port(mmio, port);
1732 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
1733 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1734 handled = 1;
1735 }
1730 1736
1731 pp = ap->private_data; 1737 if (unlikely(port_cause & ERR_IRQ)) {
1732
1733 shift = port << 1; /* (port * 2) */
1734 if (port >= MV_PORTS_PER_HC)
1735 shift++; /* skip bit 8 in the HC Main IRQ reg */
1736
1737 have_err_bits = ((ERR_IRQ << shift) & relevant);
1738
1739 if (unlikely(have_err_bits)) {
1740 struct ata_queued_cmd *qc; 1738 struct ata_queued_cmd *qc;
1741 1739
1742 qc = ata_qc_from_tag(ap, ap->link.active_tag); 1740 qc = ata_qc_from_tag(ap, ap->link.active_tag);
@@ -1747,8 +1745,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1747 continue; 1745 continue;
1748 } 1746 }
1749 1747
1750 hardport = mv_hardport_from_port(port); /* range 0..3 */ 1748 pp = ap->private_data;
1751
1752 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1749 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1753 if ((DMA_IRQ << hardport) & hc_irq_cause) 1750 if ((DMA_IRQ << hardport) & hc_irq_cause)
1754 mv_process_crpb_entries(ap, pp); 1751 mv_process_crpb_entries(ap, pp);
@@ -1757,10 +1754,10 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1757 mv_intr_pio(ap); 1754 mv_intr_pio(ap);
1758 } 1755 }
1759 } 1756 }
1760 VPRINTK("EXIT\n"); 1757 return handled;
1761} 1758}
1762 1759
1763static void mv_pci_error(struct ata_host *host, void __iomem *mmio) 1760static int mv_pci_error(struct ata_host *host, void __iomem *mmio)
1764{ 1761{
1765 struct mv_host_priv *hpriv = host->private_data; 1762 struct mv_host_priv *hpriv = host->private_data;
1766 struct ata_port *ap; 1763 struct ata_port *ap;
@@ -1798,6 +1795,7 @@ static void mv_pci_error(struct ata_host *host, void __iomem *mmio)
1798 ata_port_freeze(ap); 1795 ata_port_freeze(ap);
1799 } 1796 }
1800 } 1797 }
1798 return 1; /* handled */
1801} 1799}
1802 1800
1803/** 1801/**
@@ -1818,8 +1816,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1818{ 1816{
1819 struct ata_host *host = dev_instance; 1817 struct ata_host *host = dev_instance;
1820 struct mv_host_priv *hpriv = host->private_data; 1818 struct mv_host_priv *hpriv = host->private_data;
1821 unsigned int hc, handled = 0, n_hcs; 1819 unsigned int handled = 0;
1822 void __iomem *mmio = hpriv->base;
1823 u32 main_cause, main_mask; 1820 u32 main_cause, main_mask;
1824 1821
1825 spin_lock(&host->lock); 1822 spin_lock(&host->lock);
@@ -1829,26 +1826,12 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1829 * Deal with cases where we either have nothing pending, or have read 1826 * Deal with cases where we either have nothing pending, or have read
1830 * a bogus register value which can indicate HW removal or PCI fault. 1827 * a bogus register value which can indicate HW removal or PCI fault.
1831 */ 1828 */
1832 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU)) 1829 if ((main_cause & main_mask) && (main_cause != 0xffffffffU)) {
1833 goto out_unlock; 1830 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host)))
1834 1831 handled = mv_pci_error(host, hpriv->base);
1835 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1832 else
1836 1833 handled = mv_host_intr(host, main_cause);
1837 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
1838 mv_pci_error(host, mmio);
1839 handled = 1;
1840 goto out_unlock; /* skip all other HC irq handling */
1841 }
1842
1843 for (hc = 0; hc < n_hcs; hc++) {
1844 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1845 if (relevant) {
1846 mv_host_intr(host, relevant, hc);
1847 handled = 1;
1848 }
1849 } 1834 }
1850
1851out_unlock:
1852 spin_unlock(&host->lock); 1835 spin_unlock(&host->lock);
1853 return IRQ_RETVAL(handled); 1836 return IRQ_RETVAL(handled);
1854} 1837}