aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2008-05-02 02:13:27 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-05-06 11:37:41 -0400
commiteabd5eb1cb59bfb162e7aa23007248f2bb480816 (patch)
tree9ae0e9a071f13c1c2a123f66179deaaf80b4d3b1 /drivers/ata
parent37b9046a3e433a0b0c39ad1e81ec187d5be800ba (diff)
sata_mv fix mv_host_intr bug for hc_irq_cause
Remove the unwanted reads of hc_irq_cause from mv_host_intr(), thereby removing a bug whereby we were not always reading it when needed.. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r--drivers/ata/sata_mv.c71
1 files changed, 42 insertions, 29 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d995e0e15d87..31e42deb746f 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1818,48 +1818,61 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1818static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) 1818static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
1819{ 1819{
1820 struct mv_host_priv *hpriv = host->private_data; 1820 struct mv_host_priv *hpriv = host->private_data;
1821 void __iomem *mmio = hpriv->base, *hc_mmio = NULL; 1821 void __iomem *mmio = hpriv->base, *hc_mmio;
1822 u32 hc_irq_cause = 0;
1823 unsigned int handled = 0, port; 1822 unsigned int handled = 0, port;
1824 1823
1825 for (port = 0; port < hpriv->n_ports; port++) { 1824 for (port = 0; port < hpriv->n_ports; port++) {
1826 struct ata_port *ap = host->ports[port]; 1825 struct ata_port *ap = host->ports[port];
1827 struct mv_port_priv *pp; 1826 struct mv_port_priv *pp;
1828 unsigned int shift, hardport, port_cause; 1827 unsigned int p, shift, hardport, port_cause;
1829 /* 1828
1830 * When we move to the second hc, flag our cached
1831 * copies of hc_mmio (and hc_irq_cause) as invalid again.
1832 */
1833 if (port == MV_PORTS_PER_HC)
1834 hc_mmio = NULL;
1835 /*
1836 * Do nothing if port is not interrupting or is disabled:
1837 */
1838 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 1829 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
1839 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1840 if (!port_cause || !ap || (ap->flags & ATA_FLAG_DISABLED))
1841 continue;
1842 /* 1830 /*
1843 * Each hc within the host has its own hc_irq_cause register. 1831 * Each hc within the host has its own hc_irq_cause register,
1844 * We defer reading it until we know we need it, right now: 1832 * where the interrupting ports bits get ack'd.
1845 *
1846 * FIXME later: we don't really need to read this register
1847 * (some logic changes required below if we go that way),
1848 * because it doesn't tell us anything new. But we do need
1849 * to write to it, outside the top of this loop,
1850 * to reset the interrupt triggers for next time.
1851 */ 1833 */
1852 if (!hc_mmio) { 1834 if (hardport == 0) { /* first port on this hc ? */
1835 u32 hc_cause = (main_irq_cause >> shift) & HC0_IRQ_PEND;
1836 u32 port_mask, ack_irqs;
1837 /*
1838 * Skip this entire hc if nothing pending for any ports
1839 */
1840 if (!hc_cause) {
1841 port += MV_PORTS_PER_HC - 1;
1842 continue;
1843 }
1844 /*
1845 * We don't need/want to read the hc_irq_cause register,
1846 * because doing so hurts performance, and
1847 * main_irq_cause already gives us everything we need.
1848 *
1849 * But we do have to *write* to the hc_irq_cause to ack
1850 * the ports that we are handling this time through.
1851 *
1852 * This requires that we create a bitmap for those
1853 * ports which interrupted us, and use that bitmap
1854 * to ack (only) those ports via hc_irq_cause.
1855 */
1856 ack_irqs = 0;
1857 for (p = 0; p < MV_PORTS_PER_HC; ++p) {
1858 if ((port + p) >= hpriv->n_ports)
1859 break;
1860 port_mask = (DONE_IRQ | ERR_IRQ) << (p * 2);
1861 if (hc_cause & port_mask)
1862 ack_irqs |= (DMA_IRQ | DEV_IRQ) << p;
1863 }
1853 hc_mmio = mv_hc_base_from_port(mmio, port); 1864 hc_mmio = mv_hc_base_from_port(mmio, port);
1854 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 1865 writelfl(~ack_irqs, hc_mmio + HC_IRQ_CAUSE_OFS);
1855 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1856 handled = 1; 1866 handled = 1;
1857 } 1867 }
1868 port_cause = (main_irq_cause >> shift) & (DONE_IRQ | ERR_IRQ);
1869 if (!port_cause)
1870 continue;
1858 /* 1871 /*
1859 * Process completed CRPB response(s) before other events. 1872 * Process completed CRPB response(s) before other events.
1860 */ 1873 */
1861 pp = ap->private_data; 1874 pp = ap->private_data;
1862 if (hc_irq_cause & (DMA_IRQ << hardport)) { 1875 if (port_cause & DONE_IRQ) {
1863 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) 1876 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
1864 mv_process_crpb_entries(ap, pp); 1877 mv_process_crpb_entries(ap, pp);
1865 } 1878 }
@@ -1868,15 +1881,15 @@ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause)
1868 */ 1881 */
1869 if (unlikely(port_cause & ERR_IRQ)) { 1882 if (unlikely(port_cause & ERR_IRQ)) {
1870 mv_err_intr(ap); 1883 mv_err_intr(ap);
1871 } else if (hc_irq_cause & (DEV_IRQ << hardport)) { 1884 } else {
1872 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 1885 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1873 struct ata_queued_cmd *qc = mv_get_active_qc(ap); 1886 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
1874 if (qc) { 1887 if (qc) {
1875 ata_sff_host_intr(ap, qc); 1888 ata_sff_host_intr(ap, qc);
1876 continue; 1889 continue;
1877 } 1890 }
1891 mv_unexpected_intr(ap);
1878 } 1892 }
1879 mv_unexpected_intr(ap);
1880 } 1893 }
1881 } 1894 }
1882 return handled; 1895 return handled;