aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ata/sata_mv.c106
1 files changed, 58 insertions, 48 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 97da46a86fdd..944359256959 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -1483,6 +1483,43 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1483 return 0; 1483 return 0;
1484} 1484}
1485 1485
1486static struct ata_queued_cmd *mv_get_active_qc(struct ata_port *ap)
1487{
1488 struct mv_port_priv *pp = ap->private_data;
1489 struct ata_queued_cmd *qc;
1490
1491 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN)
1492 return NULL;
1493 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1494 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1495 qc = NULL;
1496 return qc;
1497}
1498
1499static void mv_unexpected_intr(struct ata_port *ap)
1500{
1501 struct mv_port_priv *pp = ap->private_data;
1502 struct ata_eh_info *ehi = &ap->link.eh_info;
1503 char *when = "";
1504
1505 /*
1506 * We got a device interrupt from something that
1507 * was supposed to be using EDMA or polling.
1508 */
1509 ata_ehi_clear_desc(ehi);
1510 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1511 when = " while EDMA enabled";
1512 } else {
1513 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
1514 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1515 when = " while polling";
1516 }
1517 ata_ehi_push_desc(ehi, "unexpected device interrupt%s", when);
1518 ehi->err_mask |= AC_ERR_OTHER;
1519 ehi->action |= ATA_EH_RESET;
1520 ata_port_freeze(ap);
1521}
1522
1486/** 1523/**
1487 * mv_err_intr - Handle error interrupts on the port 1524 * mv_err_intr - Handle error interrupts on the port
1488 * @ap: ATA channel to manipulate 1525 * @ap: ATA channel to manipulate
@@ -1586,28 +1623,6 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1586 ata_port_abort(ap); 1623 ata_port_abort(ap);
1587} 1624}
1588 1625
1589static void mv_intr_pio(struct ata_port *ap)
1590{
1591 struct ata_queued_cmd *qc;
1592 u8 ata_status;
1593
1594 /* ignore spurious intr if drive still BUSY */
1595 ata_status = readb(ap->ioaddr.status_addr);
1596 if (unlikely(ata_status & ATA_BUSY))
1597 return;
1598
1599 /* get active ATA command */
1600 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1601 if (unlikely(!qc)) /* no active tag */
1602 return;
1603 if (qc->tf.flags & ATA_TFLAG_POLLING) /* polling; we don't own qc */
1604 return;
1605
1606 /* and finally, complete the ATA command */
1607 qc->err_mask |= ac_err_mask(ata_status);
1608 ata_qc_complete(qc);
1609}
1610
1611static void mv_process_crpb_response(struct ata_port *ap, 1626static void mv_process_crpb_response(struct ata_port *ap,
1612 struct mv_crpb *response, unsigned int tag, int ncq_enabled) 1627 struct mv_crpb *response, unsigned int tag, int ncq_enabled)
1613{ 1628{
@@ -1680,15 +1695,7 @@ static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp
1680/** 1695/**
1681 * mv_host_intr - Handle all interrupts on the given host controller 1696 * mv_host_intr - Handle all interrupts on the given host controller
1682 * @host: host specific structure 1697 * @host: host specific structure
1683 * @relevant: port error bits relevant to this host controller 1698 * @main_cause: Main interrupt cause register for the chip.
1684 * @hc: which host controller we're to look at
1685 *
1686 * Read then write clear the HC interrupt status then walk each
1687 * port connected to the HC and see if it needs servicing. Port
1688 * success ints are reported in the HC interrupt status reg, the
1689 * port error ints are reported in the higher level main
1690 * interrupt status register and thus are passed in via the
1691 * 'relevant' argument.
1692 * 1699 *
1693 * LOCKING: 1700 * LOCKING:
1694 * Inherited from caller. 1701 * Inherited from caller.
@@ -1733,25 +1740,28 @@ static int mv_host_intr(struct ata_host *host, u32 main_cause)
1733 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 1740 writelfl(~hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
1734 handled = 1; 1741 handled = 1;
1735 } 1742 }
1736 1743 /*
1737 if (unlikely(port_cause & ERR_IRQ)) { 1744 * Process completed CRPB response(s) before other events.
1738 struct ata_queued_cmd *qc; 1745 */
1739
1740 qc = ata_qc_from_tag(ap, ap->link.active_tag);
1741 if (qc && (qc->tf.flags & ATA_TFLAG_POLLING))
1742 continue;
1743
1744 mv_err_intr(ap, qc);
1745 continue;
1746 }
1747
1748 pp = ap->private_data; 1746 pp = ap->private_data;
1749 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1747 if (hc_irq_cause & (DMA_IRQ << hardport)) {
1750 if ((DMA_IRQ << hardport) & hc_irq_cause) 1748 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN)
1751 mv_process_crpb_entries(ap, pp); 1749 mv_process_crpb_entries(ap, pp);
1752 } else { 1750 }
1753 if ((DEV_IRQ << hardport) & hc_irq_cause) 1751 /*
1754 mv_intr_pio(ap); 1752 * Handle chip-reported errors, or continue on to handle PIO.
1753 */
1754 if (unlikely(port_cause & ERR_IRQ)) {
1755 mv_err_intr(ap, mv_get_active_qc(ap));
1756 } else if (hc_irq_cause & (DEV_IRQ << hardport)) {
1757 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
1758 struct ata_queued_cmd *qc = mv_get_active_qc(ap);
1759 if (qc) {
1760 ata_sff_host_intr(ap, qc);
1761 continue;
1762 }
1763 }
1764 mv_unexpected_intr(ap);
1755 } 1765 }
1756 } 1766 }
1757 return handled; 1767 return handled;