aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/ata/sata_mv.c165
1 files changed, 165 insertions, 0 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 1991eb22e388..b948dc866e04 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -545,6 +545,8 @@ static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class,
545static int mv_softreset(struct ata_link *link, unsigned int *class, 545static int mv_softreset(struct ata_link *link, unsigned int *class,
546 unsigned long deadline); 546 unsigned long deadline);
547static void mv_pmp_error_handler(struct ata_port *ap); 547static void mv_pmp_error_handler(struct ata_port *ap);
548static void mv_process_crpb_entries(struct ata_port *ap,
549 struct mv_port_priv *pp);
548 550
549/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 551/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
550 * because we have to allow room for worst case splitting of 552 * because we have to allow room for worst case splitting of
@@ -1156,6 +1158,10 @@ static void mv_config_fbs(void __iomem *port_mmio, int want_ncq, int want_fbs)
1156 if (want_fbs) { 1158 if (want_fbs) {
1157 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC; 1159 new_fiscfg = old_fiscfg | FISCFG_SINGLE_SYNC;
1158 new_ltmode = old_ltmode | LTMODE_BIT8; 1160 new_ltmode = old_ltmode | LTMODE_BIT8;
1161 if (want_ncq)
1162 new_haltcond &= ~EDMA_ERR_DEV;
1163 else
1164 new_fiscfg |= FISCFG_WAIT_DEV_ERR;
1159 } 1165 }
1160 1166
1161 if (new_fiscfg != old_fiscfg) 1167 if (new_fiscfg != old_fiscfg)
@@ -1627,6 +1633,154 @@ static void mv_pmp_error_handler(struct ata_port *ap)
1627 sata_pmp_error_handler(ap); 1633 sata_pmp_error_handler(ap);
1628} 1634}
1629 1635
1636static unsigned int mv_get_err_pmp_map(struct ata_port *ap)
1637{
1638 void __iomem *port_mmio = mv_ap_base(ap);
1639
1640 return readl(port_mmio + SATA_TESTCTL_OFS) >> 16;
1641}
1642
1643static int mv_count_pmp_links(unsigned int pmp_map)
1644{
1645 unsigned int link_count = 0;
1646
1647 while (pmp_map) {
1648 link_count += (pmp_map & 1);
1649 pmp_map >>= 1;
1650 }
1651 return link_count;
1652}
1653
1654static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1655{
1656 struct ata_eh_info *ehi;
1657 unsigned int pmp;
1658
1659 /*
1660 * Initialize EH info for PMPs which saw device errors
1661 */
1662 ehi = &ap->link.eh_info;
1663 for (pmp = 0; pmp_map != 0; pmp++) {
1664 unsigned int this_pmp = (1 << pmp);
1665 if (pmp_map & this_pmp) {
1666 struct ata_link *link = &ap->pmp_link[pmp];
1667
1668 pmp_map &= ~this_pmp;
1669 ehi = &link->eh_info;
1670 ata_ehi_clear_desc(ehi);
1671 ata_ehi_push_desc(ehi, "dev err");
1672 ehi->err_mask |= AC_ERR_DEV;
1673 ehi->action |= ATA_EH_RESET;
1674 ata_link_abort(link);
1675 }
1676 }
1677}
1678
1679static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1680{
1681 struct mv_port_priv *pp = ap->private_data;
1682 int failed_links;
1683 unsigned int old_map, new_map;
1684
1685 /*
1686 * Device error during FBS+NCQ operation:
1687 *
1688 * Set a port flag to prevent further I/O being enqueued.
1689 * Leave the EDMA running to drain outstanding commands from this port.
1690 * Perform the post-mortem/EH only when all responses are complete.
1691 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.2).
1692 */
1693 if (!(pp->pp_flags & MV_PP_FLAG_DELAYED_EH)) {
1694 pp->pp_flags |= MV_PP_FLAG_DELAYED_EH;
1695 pp->delayed_eh_pmp_map = 0;
1696 }
1697 old_map = pp->delayed_eh_pmp_map;
1698 new_map = old_map | mv_get_err_pmp_map(ap);
1699
1700 if (old_map != new_map) {
1701 pp->delayed_eh_pmp_map = new_map;
1702 mv_pmp_eh_prep(ap, new_map & ~old_map);
1703 }
1704 failed_links = mv_count_pmp_links(new_map);
1705
1706 ata_port_printk(ap, KERN_INFO, "%s: pmp_map=%04x qc_map=%04x "
1707 "failed_links=%d nr_active_links=%d\n",
1708 __func__, pp->delayed_eh_pmp_map,
1709 ap->qc_active, failed_links,
1710 ap->nr_active_links);
1711
1712 if (ap->nr_active_links <= failed_links) {
1713 mv_process_crpb_entries(ap, pp);
1714 mv_stop_edma(ap);
1715 mv_eh_freeze(ap);
1716 ata_port_printk(ap, KERN_INFO, "%s: done\n", __func__);
1717 return 1; /* handled */
1718 }
1719 ata_port_printk(ap, KERN_INFO, "%s: waiting\n", __func__);
1720 return 1; /* handled */
1721}
1722
1723static int mv_handle_fbs_non_ncq_dev_err(struct ata_port *ap)
1724{
1725 /*
1726 * Possible future enhancement:
1727 *
1728 * FBS+non-NCQ operation is not yet implemented.
1729 * See related notes in mv_edma_cfg().
1730 *
1731 * Device error during FBS+non-NCQ operation:
1732 *
1733 * We need to snapshot the shadow registers for each failed command.
1734 * Follow recovery sequence from 6042/7042 datasheet (7.3.15.4.2.3).
1735 */
1736 return 0; /* not handled */
1737}
1738
1739static int mv_handle_dev_err(struct ata_port *ap, u32 edma_err_cause)
1740{
1741 struct mv_port_priv *pp = ap->private_data;
1742
1743 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN))
1744 return 0; /* EDMA was not active: not handled */
1745 if (!(pp->pp_flags & MV_PP_FLAG_FBS_EN))
1746 return 0; /* FBS was not active: not handled */
1747
1748 if (!(edma_err_cause & EDMA_ERR_DEV))
1749 return 0; /* non DEV error: not handled */
1750 edma_err_cause &= ~EDMA_ERR_IRQ_TRANSIENT;
1751 if (edma_err_cause & ~(EDMA_ERR_DEV | EDMA_ERR_SELF_DIS))
1752 return 0; /* other problems: not handled */
1753
1754 if (pp->pp_flags & MV_PP_FLAG_NCQ_EN) {
1755 /*
1756 * EDMA should NOT have self-disabled for this case.
1757 * If it did, then something is wrong elsewhere,
1758 * and we cannot handle it here.
1759 */
1760 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1761 ata_port_printk(ap, KERN_WARNING,
1762 "%s: err_cause=0x%x pp_flags=0x%x\n",
1763 __func__, edma_err_cause, pp->pp_flags);
1764 return 0; /* not handled */
1765 }
1766 return mv_handle_fbs_ncq_dev_err(ap);
1767 } else {
1768 /*
1769 * EDMA should have self-disabled for this case.
1770 * If it did not, then something is wrong elsewhere,
1771 * and we cannot handle it here.
1772 */
1773 if (!(edma_err_cause & EDMA_ERR_SELF_DIS)) {
1774 ata_port_printk(ap, KERN_WARNING,
1775 "%s: err_cause=0x%x pp_flags=0x%x\n",
1776 __func__, edma_err_cause, pp->pp_flags);
1777 return 0; /* not handled */
1778 }
1779 return mv_handle_fbs_non_ncq_dev_err(ap);
1780 }
1781 return 0; /* not handled */
1782}
1783
1630static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled) 1784static void mv_unexpected_intr(struct ata_port *ap, int edma_was_enabled)
1631{ 1785{
1632 struct ata_eh_info *ehi = &ap->link.eh_info; 1786 struct ata_eh_info *ehi = &ap->link.eh_info;
@@ -1683,6 +1837,15 @@ static void mv_err_intr(struct ata_port *ap)
1683 ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", 1837 ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n",
1684 __func__, edma_err_cause, pp->pp_flags); 1838 __func__, edma_err_cause, pp->pp_flags);
1685 1839
1840 if (edma_err_cause & EDMA_ERR_DEV) {
1841 /*
1842 * Device errors during FIS-based switching operation
1843 * require special handling.
1844 */
1845 if (mv_handle_dev_err(ap, edma_err_cause))
1846 return;
1847 }
1848
1686 qc = mv_get_active_qc(ap); 1849 qc = mv_get_active_qc(ap);
1687 ata_ehi_clear_desc(ehi); 1850 ata_ehi_clear_desc(ehi);
1688 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 1851 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
@@ -1861,6 +2024,8 @@ static void mv_port_intr(struct ata_port *ap, u32 port_cause)
1861 */ 2024 */
1862 if (edma_was_enabled && (port_cause & DONE_IRQ)) { 2025 if (edma_was_enabled && (port_cause & DONE_IRQ)) {
1863 mv_process_crpb_entries(ap, pp); 2026 mv_process_crpb_entries(ap, pp);
2027 if (pp->pp_flags & MV_PP_FLAG_DELAYED_EH)
2028 mv_handle_fbs_ncq_dev_err(ap);
1864 } 2029 }
1865 /* 2030 /*
1866 * Handle chip-reported errors, or continue on to handle PIO. 2031 * Handle chip-reported errors, or continue on to handle PIO.