diff options
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 64 |
1 files changed, 43 insertions, 21 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index e4d411cec79a..d995e0e15d87 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1627,7 +1627,7 @@ static void mv_unexpected_intr(struct ata_port *ap) | |||
1627 | * LOCKING: | 1627 | * LOCKING: |
1628 | * Inherited from caller. | 1628 | * Inherited from caller. |
1629 | */ | 1629 | */ |
1630 | static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | 1630 | static void mv_err_intr(struct ata_port *ap) |
1631 | { | 1631 | { |
1632 | void __iomem *port_mmio = mv_ap_base(ap); | 1632 | void __iomem *port_mmio = mv_ap_base(ap); |
1633 | u32 edma_err_cause, eh_freeze_mask, serr = 0; | 1633 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
@@ -1635,24 +1635,33 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1635 | struct mv_host_priv *hpriv = ap->host->private_data; | 1635 | struct mv_host_priv *hpriv = ap->host->private_data; |
1636 | unsigned int action = 0, err_mask = 0; | 1636 | unsigned int action = 0, err_mask = 0; |
1637 | struct ata_eh_info *ehi = &ap->link.eh_info; | 1637 | struct ata_eh_info *ehi = &ap->link.eh_info; |
1638 | 1638 | struct ata_queued_cmd *qc; | |
1639 | ata_ehi_clear_desc(ehi); | 1639 | int abort = 0; |
1640 | 1640 | ||
1641 | /* | 1641 | /* |
1642 | * Read and clear the err_cause bits. This won't actually | 1642 | * Read and clear the SError and err_cause bits. |
1643 | * clear for some errors (eg. SError), but we will be doing | ||
1644 | * a hard reset in those cases regardless, which *will* clear it. | ||
1645 | */ | 1643 | */ |
1644 | sata_scr_read(&ap->link, SCR_ERROR, &serr); | ||
1645 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | ||
1646 | |||
1646 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1647 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1647 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1648 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1648 | 1649 | ||
1649 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause); | 1650 | ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", |
1651 | __func__, edma_err_cause, pp->pp_flags); | ||
1650 | 1652 | ||
1653 | qc = mv_get_active_qc(ap); | ||
1654 | ata_ehi_clear_desc(ehi); | ||
1655 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", | ||
1656 | edma_err_cause, pp->pp_flags); | ||
1651 | /* | 1657 | /* |
1652 | * All generations share these EDMA error cause bits: | 1658 | * All generations share these EDMA error cause bits: |
1653 | */ | 1659 | */ |
1654 | if (edma_err_cause & EDMA_ERR_DEV) | 1660 | if (edma_err_cause & EDMA_ERR_DEV) { |
1655 | err_mask |= AC_ERR_DEV; | 1661 | err_mask |= AC_ERR_DEV; |
1662 | action |= ATA_EH_RESET; | ||
1663 | ata_ehi_push_desc(ehi, "dev error"); | ||
1664 | } | ||
1656 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | | 1665 | if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | |
1657 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | | 1666 | EDMA_ERR_CRQB_PAR | EDMA_ERR_CRPB_PAR | |
1658 | EDMA_ERR_INTRL_PAR)) { | 1667 | EDMA_ERR_INTRL_PAR)) { |
@@ -1684,13 +1693,6 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1684 | ata_ehi_push_desc(ehi, "EDMA self-disable"); | 1693 | ata_ehi_push_desc(ehi, "EDMA self-disable"); |
1685 | } | 1694 | } |
1686 | if (edma_err_cause & EDMA_ERR_SERR) { | 1695 | if (edma_err_cause & EDMA_ERR_SERR) { |
1687 | /* | ||
1688 | * Ensure that we read our own SCR, not a pmp link SCR: | ||
1689 | */ | ||
1690 | ap->ops->scr_read(ap, SCR_ERROR, &serr); | ||
1691 | /* | ||
1692 | * Don't clear SError here; leave it for libata-eh: | ||
1693 | */ | ||
1694 | ata_ehi_push_desc(ehi, "SError=%08x", serr); | 1696 | ata_ehi_push_desc(ehi, "SError=%08x", serr); |
1695 | err_mask |= AC_ERR_ATA_BUS; | 1697 | err_mask |= AC_ERR_ATA_BUS; |
1696 | action |= ATA_EH_RESET; | 1698 | action |= ATA_EH_RESET; |
@@ -1710,10 +1712,29 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
1710 | else | 1712 | else |
1711 | ehi->err_mask |= err_mask; | 1713 | ehi->err_mask |= err_mask; |
1712 | 1714 | ||
1713 | if (edma_err_cause & eh_freeze_mask) | 1715 | if (err_mask == AC_ERR_DEV) { |
1716 | /* | ||
1717 | * Cannot do ata_port_freeze() here, | ||
1718 | * because it would kill PIO access, | ||
1719 | * which is needed for further diagnosis. | ||
1720 | */ | ||
1721 | mv_eh_freeze(ap); | ||
1722 | abort = 1; | ||
1723 | } else if (edma_err_cause & eh_freeze_mask) { | ||
1724 | /* | ||
1725 | * Note to self: ata_port_freeze() calls ata_port_abort() | ||
1726 | */ | ||
1714 | ata_port_freeze(ap); | 1727 | ata_port_freeze(ap); |
1715 | else | 1728 | } else { |
1716 | ata_port_abort(ap); | 1729 | abort = 1; |
1730 | } | ||
1731 | |||
1732 | if (abort) { | ||
1733 | if (qc) | ||
1734 | ata_link_abort(qc->dev->link); | ||
1735 | else | ||
1736 | ata_port_abort(ap); | ||
1737 | } | ||
1717 | } | 1738 | } |
1718 | 1739 | ||
1719 | static void mv_process_crpb_response(struct ata_port *ap, | 1740 | static void mv_process_crpb_response(struct ata_port *ap, |
@@ -1740,8 +1761,9 @@ static void mv_process_crpb_response(struct ata_port *ap, | |||
1740 | } | 1761 | } |
1741 | } | 1762 | } |
1742 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; | 1763 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; |
1743 | qc->err_mask |= ac_err_mask(ata_status); | 1764 | if (!ac_err_mask(ata_status)) |
1744 | ata_qc_complete(qc); | 1765 | ata_qc_complete(qc); |
1766 | /* else: leave it for mv_err_intr() */ | ||
1745 | } else { | 1767 | } else { |
1746 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", | 1768 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", |
1747 | __func__, tag); | 1769 | __func__, tag); |
@@ -1845,7 +1867,7 @@ static int mv_host_intr(struct ata_host *host, u32 main_irq_cause) | |||
1845 | * Handle chip-reported errors, or continue on to handle PIO. | 1867 | * Handle chip-reported errors, or continue on to handle PIO. |
1846 | */ | 1868 | */ |
1847 | if (unlikely(port_cause & ERR_IRQ)) { | 1869 | if (unlikely(port_cause & ERR_IRQ)) { |
1848 | mv_err_intr(ap, mv_get_active_qc(ap)); | 1870 | mv_err_intr(ap); |
1849 | } else if (hc_irq_cause & (DEV_IRQ << hardport)) { | 1871 | } else if (hc_irq_cause & (DEV_IRQ << hardport)) { |
1850 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | 1872 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
1851 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); | 1873 | struct ata_queued_cmd *qc = mv_get_active_qc(ap); |