aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/scsi/Makefile2
-rw-r--r--drivers/scsi/libata-core.c408
-rw-r--r--drivers/scsi/libata-eh.c264
-rw-r--r--drivers/scsi/libata-scsi.c172
-rw-r--r--drivers/scsi/libata.h17
-rw-r--r--drivers/scsi/pdc_adma.c4
-rw-r--r--drivers/scsi/sata_mv.c2
-rw-r--r--drivers/scsi/sata_nv.c2
-rw-r--r--drivers/scsi/sata_promise.c2
-rw-r--r--drivers/scsi/sata_qstor.c4
-rw-r--r--drivers/scsi/sata_sil24.c2
-rw-r--r--drivers/scsi/sata_sx4.c2
-rw-r--r--drivers/scsi/sata_vsc.c2
-rw-r--r--include/linux/libata.h92
14 files changed, 692 insertions, 283 deletions
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index e513c3158ad9..503f189dab3b 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -165,7 +165,7 @@ ncr53c8xx-flags-$(CONFIG_SCSI_ZALON) \
165CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m) 165CFLAGS_ncr53c8xx.o := $(ncr53c8xx-flags-y) $(ncr53c8xx-flags-m)
166zalon7xx-objs := zalon.o ncr53c8xx.o 166zalon7xx-objs := zalon.o ncr53c8xx.o
167NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o 167NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
168libata-objs := libata-core.o libata-scsi.o libata-bmdma.o 168libata-objs := libata-core.o libata-scsi.o libata-bmdma.o libata-eh.o
169oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 169oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
170 170
171# Files generated that shall be removed upon make clean 171# Files generated that shall be removed upon make clean
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c
index 597e9e8bcd2c..9de48dd4234a 100644
--- a/drivers/scsi/libata-core.c
+++ b/drivers/scsi/libata-core.c
@@ -65,7 +65,6 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
65 struct ata_device *dev, 65 struct ata_device *dev,
66 u16 heads, 66 u16 heads,
67 u16 sectors); 67 u16 sectors);
68static int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
69static unsigned int ata_dev_set_xfermode(struct ata_port *ap, 68static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
70 struct ata_device *dev); 69 struct ata_device *dev);
71static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); 70static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev);
@@ -409,7 +408,7 @@ static const char *sata_spd_string(unsigned int spd)
409 return spd_str[spd - 1]; 408 return spd_str[spd - 1];
410} 409}
411 410
412static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) 411void ata_dev_disable(struct ata_port *ap, struct ata_device *dev)
413{ 412{
414 if (ata_dev_enabled(dev)) { 413 if (ata_dev_enabled(dev)) {
415 printk(KERN_WARNING "ata%u: dev %u disabled\n", 414 printk(KERN_WARNING "ata%u: dev %u disabled\n",
@@ -961,6 +960,7 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
961 * @ap: Port to which the command is sent 960 * @ap: Port to which the command is sent
962 * @dev: Device to which the command is sent 961 * @dev: Device to which the command is sent
963 * @tf: Taskfile registers for the command and the result 962 * @tf: Taskfile registers for the command and the result
963 * @cdb: CDB for packet command
964 * @dma_dir: Data tranfer direction of the command 964 * @dma_dir: Data tranfer direction of the command
965 * @buf: Data buffer of the command 965 * @buf: Data buffer of the command
966 * @buflen: Length of data buffer 966 * @buflen: Length of data buffer
@@ -975,10 +975,9 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc)
975 * None. Should be called with kernel context, might sleep. 975 * None. Should be called with kernel context, might sleep.
976 */ 976 */
977 977
978static unsigned 978unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
979ata_exec_internal(struct ata_port *ap, struct ata_device *dev, 979 struct ata_taskfile *tf, const u8 *cdb,
980 struct ata_taskfile *tf, 980 int dma_dir, void *buf, unsigned int buflen)
981 int dma_dir, void *buf, unsigned int buflen)
982{ 981{
983 u8 command = tf->command; 982 u8 command = tf->command;
984 struct ata_queued_cmd *qc; 983 struct ata_queued_cmd *qc;
@@ -992,6 +991,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
992 BUG_ON(qc == NULL); 991 BUG_ON(qc == NULL);
993 992
994 qc->tf = *tf; 993 qc->tf = *tf;
994 if (cdb)
995 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
995 qc->dma_dir = dma_dir; 996 qc->dma_dir = dma_dir;
996 if (dma_dir != DMA_NONE) { 997 if (dma_dir != DMA_NONE) {
997 ata_sg_init_one(qc, buf, buflen); 998 ata_sg_init_one(qc, buf, buflen);
@@ -1042,7 +1043,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
1042 * 1043 *
1043 * Kill the following code as soon as those drivers are fixed. 1044 * Kill the following code as soon as those drivers are fixed.
1044 */ 1045 */
1045 if (ap->flags & ATA_FLAG_PORT_DISABLED) { 1046 if (ap->flags & ATA_FLAG_DISABLED) {
1046 err_mask |= AC_ERR_SYSTEM; 1047 err_mask |= AC_ERR_SYSTEM;
1047 ata_port_probe(ap); 1048 ata_port_probe(ap);
1048 } 1049 }
@@ -1141,7 +1142,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev,
1141 1142
1142 tf.protocol = ATA_PROT_PIO; 1143 tf.protocol = ATA_PROT_PIO;
1143 1144
1144 err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, 1145 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE,
1145 id, sizeof(id[0]) * ATA_ID_WORDS); 1146 id, sizeof(id[0]) * ATA_ID_WORDS);
1146 if (err_mask) { 1147 if (err_mask) {
1147 rc = -EIO; 1148 rc = -EIO;
@@ -1238,7 +1239,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev,
1238 id[84], id[85], id[86], id[87], id[88]); 1239 id[84], id[85], id[86], id[87], id[88]);
1239 1240
1240 /* initialize to-be-configured parameters */ 1241 /* initialize to-be-configured parameters */
1241 dev->flags = 0; 1242 dev->flags &= ~ATA_DFLAG_CFG_MASK;
1242 dev->max_sectors = 0; 1243 dev->max_sectors = 0;
1243 dev->cdb_len = 0; 1244 dev->cdb_len = 0;
1244 dev->n_sectors = 0; 1245 dev->n_sectors = 0;
@@ -1381,11 +1382,18 @@ err_out_nosup:
1381static int ata_bus_probe(struct ata_port *ap) 1382static int ata_bus_probe(struct ata_port *ap)
1382{ 1383{
1383 unsigned int classes[ATA_MAX_DEVICES]; 1384 unsigned int classes[ATA_MAX_DEVICES];
1384 int i, rc, found = 0; 1385 int tries[ATA_MAX_DEVICES];
1386 int i, rc, down_xfermask;
1385 struct ata_device *dev; 1387 struct ata_device *dev;
1386 1388
1387 ata_port_probe(ap); 1389 ata_port_probe(ap);
1388 1390
1391 for (i = 0; i < ATA_MAX_DEVICES; i++)
1392 tries[i] = ATA_PROBE_MAX_TRIES;
1393
1394 retry:
1395 down_xfermask = 0;
1396
1389 /* reset and determine device classes */ 1397 /* reset and determine device classes */
1390 for (i = 0; i < ATA_MAX_DEVICES; i++) 1398 for (i = 0; i < ATA_MAX_DEVICES; i++)
1391 classes[i] = ATA_DEV_UNKNOWN; 1399 classes[i] = ATA_DEV_UNKNOWN;
@@ -1399,7 +1407,7 @@ static int ata_bus_probe(struct ata_port *ap)
1399 } else { 1407 } else {
1400 ap->ops->phy_reset(ap); 1408 ap->ops->phy_reset(ap);
1401 1409
1402 if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) 1410 if (!(ap->flags & ATA_FLAG_DISABLED))
1403 for (i = 0; i < ATA_MAX_DEVICES; i++) 1411 for (i = 0; i < ATA_MAX_DEVICES; i++)
1404 classes[i] = ap->device[i].class; 1412 classes[i] = ap->device[i].class;
1405 1413
@@ -1415,21 +1423,23 @@ static int ata_bus_probe(struct ata_port *ap)
1415 dev = &ap->device[i]; 1423 dev = &ap->device[i];
1416 dev->class = classes[i]; 1424 dev->class = classes[i];
1417 1425
1418 if (!ata_dev_enabled(dev)) 1426 if (!tries[i]) {
1419 continue; 1427 ata_down_xfermask_limit(ap, dev, 1);
1420 1428 ata_dev_disable(ap, dev);
1421 WARN_ON(dev->id != NULL);
1422 if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) {
1423 dev->class = ATA_DEV_NONE;
1424 continue;
1425 } 1429 }
1426 1430
1427 if (ata_dev_configure(ap, dev, 1)) { 1431 if (!ata_dev_enabled(dev))
1428 ata_dev_disable(ap, dev);
1429 continue; 1432 continue;
1430 }
1431 1433
1432 found = 1; 1434 kfree(dev->id);
1435 dev->id = NULL;
1436 rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id);
1437 if (rc)
1438 goto fail;
1439
1440 rc = ata_dev_configure(ap, dev, 1);
1441 if (rc)
1442 goto fail;
1433 } 1443 }
1434 1444
1435 /* configure transfer mode */ 1445 /* configure transfer mode */
@@ -1438,12 +1448,18 @@ static int ata_bus_probe(struct ata_port *ap)
1438 * return error code and failing device on failure as 1448 * return error code and failing device on failure as
1439 * ata_set_mode() does. 1449 * ata_set_mode() does.
1440 */ 1450 */
1441 if (found) 1451 for (i = 0; i < ATA_MAX_DEVICES; i++)
1442 ap->ops->set_mode(ap); 1452 if (ata_dev_enabled(&ap->device[i])) {
1453 ap->ops->set_mode(ap);
1454 break;
1455 }
1443 rc = 0; 1456 rc = 0;
1444 } else { 1457 } else {
1445 while (ata_set_mode(ap, &dev)) 1458 rc = ata_set_mode(ap, &dev);
1446 ata_dev_disable(ap, dev); 1459 if (rc) {
1460 down_xfermask = 1;
1461 goto fail;
1462 }
1447 } 1463 }
1448 1464
1449 for (i = 0; i < ATA_MAX_DEVICES; i++) 1465 for (i = 0; i < ATA_MAX_DEVICES; i++)
@@ -1454,6 +1470,24 @@ static int ata_bus_probe(struct ata_port *ap)
1454 ata_port_disable(ap); 1470 ata_port_disable(ap);
1455 ap->ops->port_disable(ap); 1471 ap->ops->port_disable(ap);
1456 return -ENODEV; 1472 return -ENODEV;
1473
1474 fail:
1475 switch (rc) {
1476 case -EINVAL:
1477 case -ENODEV:
1478 tries[dev->devno] = 0;
1479 break;
1480 case -EIO:
1481 ata_down_sata_spd_limit(ap);
1482 /* fall through */
1483 default:
1484 tries[dev->devno]--;
1485 if (down_xfermask &&
1486 ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1))
1487 tries[dev->devno] = 0;
1488 }
1489
1490 goto retry;
1457} 1491}
1458 1492
1459/** 1493/**
@@ -1469,7 +1503,7 @@ static int ata_bus_probe(struct ata_port *ap)
1469 1503
1470void ata_port_probe(struct ata_port *ap) 1504void ata_port_probe(struct ata_port *ap)
1471{ 1505{
1472 ap->flags &= ~ATA_FLAG_PORT_DISABLED; 1506 ap->flags &= ~ATA_FLAG_DISABLED;
1473} 1507}
1474 1508
1475/** 1509/**
@@ -1543,7 +1577,7 @@ void __sata_phy_reset(struct ata_port *ap)
1543 else 1577 else
1544 ata_port_disable(ap); 1578 ata_port_disable(ap);
1545 1579
1546 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1580 if (ap->flags & ATA_FLAG_DISABLED)
1547 return; 1581 return;
1548 1582
1549 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { 1583 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
@@ -1568,7 +1602,7 @@ void __sata_phy_reset(struct ata_port *ap)
1568void sata_phy_reset(struct ata_port *ap) 1602void sata_phy_reset(struct ata_port *ap)
1569{ 1603{
1570 __sata_phy_reset(ap); 1604 __sata_phy_reset(ap);
1571 if (ap->flags & ATA_FLAG_PORT_DISABLED) 1605 if (ap->flags & ATA_FLAG_DISABLED)
1572 return; 1606 return;
1573 ata_bus_reset(ap); 1607 ata_bus_reset(ap);
1574} 1608}
@@ -1607,7 +1641,121 @@ void ata_port_disable(struct ata_port *ap)
1607{ 1641{
1608 ap->device[0].class = ATA_DEV_NONE; 1642 ap->device[0].class = ATA_DEV_NONE;
1609 ap->device[1].class = ATA_DEV_NONE; 1643 ap->device[1].class = ATA_DEV_NONE;
1610 ap->flags |= ATA_FLAG_PORT_DISABLED; 1644 ap->flags |= ATA_FLAG_DISABLED;
1645}
1646
1647/**
1648 * ata_down_sata_spd_limit - adjust SATA spd limit downward
1649 * @ap: Port to adjust SATA spd limit for
1650 *
1651 * Adjust SATA spd limit of @ap downward. Note that this
1652 * function only adjusts the limit. The change must be applied
1653 * using ata_set_sata_spd().
1654 *
1655 * LOCKING:
1656 * Inherited from caller.
1657 *
1658 * RETURNS:
1659 * 0 on success, negative errno on failure
1660 */
1661int ata_down_sata_spd_limit(struct ata_port *ap)
1662{
1663 u32 spd, mask;
1664 int highbit;
1665
1666 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1667 return -EOPNOTSUPP;
1668
1669 mask = ap->sata_spd_limit;
1670 if (mask <= 1)
1671 return -EINVAL;
1672 highbit = fls(mask) - 1;
1673 mask &= ~(1 << highbit);
1674
1675 spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf;
1676 if (spd <= 1)
1677 return -EINVAL;
1678 spd--;
1679 mask &= (1 << spd) - 1;
1680 if (!mask)
1681 return -EINVAL;
1682
1683 ap->sata_spd_limit = mask;
1684
1685 printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n",
1686 ap->id, sata_spd_string(fls(mask)));
1687
1688 return 0;
1689}
1690
1691static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol)
1692{
1693 u32 spd, limit;
1694
1695 if (ap->sata_spd_limit == UINT_MAX)
1696 limit = 0;
1697 else
1698 limit = fls(ap->sata_spd_limit);
1699
1700 spd = (*scontrol >> 4) & 0xf;
1701 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1702
1703 return spd != limit;
1704}
1705
1706/**
1707 * ata_set_sata_spd_needed - is SATA spd configuration needed
1708 * @ap: Port in question
1709 *
1710 * Test whether the spd limit in SControl matches
1711 * @ap->sata_spd_limit. This function is used to determine
1712 * whether hardreset is necessary to apply SATA spd
1713 * configuration.
1714 *
1715 * LOCKING:
1716 * Inherited from caller.
1717 *
1718 * RETURNS:
1719 * 1 if SATA spd configuration is needed, 0 otherwise.
1720 */
1721int ata_set_sata_spd_needed(struct ata_port *ap)
1722{
1723 u32 scontrol;
1724
1725 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1726 return 0;
1727
1728 scontrol = scr_read(ap, SCR_CONTROL);
1729
1730 return __ata_set_sata_spd_needed(ap, &scontrol);
1731}
1732
1733/**
1734 * ata_set_sata_spd - set SATA spd according to spd limit
1735 * @ap: Port to set SATA spd for
1736 *
1737 * Set SATA spd of @ap according to sata_spd_limit.
1738 *
1739 * LOCKING:
1740 * Inherited from caller.
1741 *
1742 * RETURNS:
1743 * 0 if spd doesn't need to be changed, 1 if spd has been
1744 * changed. -EOPNOTSUPP if SCR registers are inaccessible.
1745 */
1746static int ata_set_sata_spd(struct ata_port *ap)
1747{
1748 u32 scontrol;
1749
1750 if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read)
1751 return -EOPNOTSUPP;
1752
1753 scontrol = scr_read(ap, SCR_CONTROL);
1754 if (!__ata_set_sata_spd_needed(ap, &scontrol))
1755 return 0;
1756
1757 scr_write(ap, SCR_CONTROL, scontrol);
1758 return 1;
1611} 1759}
1612 1760
1613/* 1761/*
@@ -1758,11 +1906,62 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed,
1758 return 0; 1906 return 0;
1759} 1907}
1760 1908
1909/**
1910 * ata_down_xfermask_limit - adjust dev xfer masks downward
1911 * @ap: Port associated with device @dev
1912 * @dev: Device to adjust xfer masks
1913 * @force_pio0: Force PIO0
1914 *
1915 * Adjust xfer masks of @dev downward. Note that this function
1916 * does not apply the change. Invoking ata_set_mode() afterwards
1917 * will apply the limit.
1918 *
1919 * LOCKING:
1920 * Inherited from caller.
1921 *
1922 * RETURNS:
1923 * 0 on success, negative errno on failure
1924 */
1925int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
1926 int force_pio0)
1927{
1928 unsigned long xfer_mask;
1929 int highbit;
1930
1931 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
1932 dev->udma_mask);
1933
1934 if (!xfer_mask)
1935 goto fail;
1936 /* don't gear down to MWDMA from UDMA, go directly to PIO */
1937 if (xfer_mask & ATA_MASK_UDMA)
1938 xfer_mask &= ~ATA_MASK_MWDMA;
1939
1940 highbit = fls(xfer_mask) - 1;
1941 xfer_mask &= ~(1 << highbit);
1942 if (force_pio0)
1943 xfer_mask &= 1 << ATA_SHIFT_PIO;
1944 if (!xfer_mask)
1945 goto fail;
1946
1947 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
1948 &dev->udma_mask);
1949
1950 printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n",
1951 ap->id, dev->devno, ata_mode_string(xfer_mask));
1952
1953 return 0;
1954
1955 fail:
1956 return -EINVAL;
1957}
1958
1761static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) 1959static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1762{ 1960{
1763 unsigned int err_mask; 1961 unsigned int err_mask;
1764 int rc; 1962 int rc;
1765 1963
1964 dev->flags &= ~ATA_DFLAG_PIO;
1766 if (dev->xfer_shift == ATA_SHIFT_PIO) 1965 if (dev->xfer_shift == ATA_SHIFT_PIO)
1767 dev->flags |= ATA_DFLAG_PIO; 1966 dev->flags |= ATA_DFLAG_PIO;
1768 1967
@@ -1775,12 +1974,8 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1775 } 1974 }
1776 1975
1777 rc = ata_dev_revalidate(ap, dev, 0); 1976 rc = ata_dev_revalidate(ap, dev, 0);
1778 if (rc) { 1977 if (rc)
1779 printk(KERN_ERR
1780 "ata%u: failed to revalidate after set xfermode\n",
1781 ap->id);
1782 return rc; 1978 return rc;
1783 }
1784 1979
1785 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", 1980 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
1786 dev->xfer_shift, (int)dev->xfer_mode); 1981 dev->xfer_shift, (int)dev->xfer_mode);
@@ -1806,7 +2001,7 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1806 * RETURNS: 2001 * RETURNS:
1807 * 0 on success, negative errno otherwise 2002 * 0 on success, negative errno otherwise
1808 */ 2003 */
1809static int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) 2004int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1810{ 2005{
1811 struct ata_device *dev; 2006 struct ata_device *dev;
1812 int i, rc = 0, used_dma = 0, found = 0; 2007 int i, rc = 0, used_dma = 0, found = 0;
@@ -2069,7 +2264,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap,
2069 * Obtains host_set lock. 2264 * Obtains host_set lock.
2070 * 2265 *
2071 * SIDE EFFECTS: 2266 * SIDE EFFECTS:
2072 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. 2267 * Sets ATA_FLAG_DISABLED if bus reset fails.
2073 */ 2268 */
2074 2269
2075void ata_bus_reset(struct ata_port *ap) 2270void ata_bus_reset(struct ata_port *ap)
@@ -2179,7 +2374,14 @@ static int sata_phy_resume(struct ata_port *ap)
2179void ata_std_probeinit(struct ata_port *ap) 2374void ata_std_probeinit(struct ata_port *ap)
2180{ 2375{
2181 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { 2376 if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) {
2377 u32 spd;
2378
2182 sata_phy_resume(ap); 2379 sata_phy_resume(ap);
2380
2381 spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4;
2382 if (spd)
2383 ap->sata_spd_limit &= (1 << spd) - 1;
2384
2183 if (sata_dev_present(ap)) 2385 if (sata_dev_present(ap))
2184 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); 2386 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2185 } 2387 }
@@ -2267,18 +2469,30 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class)
2267 2469
2268 DPRINTK("ENTER\n"); 2470 DPRINTK("ENTER\n");
2269 2471
2270 /* Issue phy wake/reset */ 2472 if (ata_set_sata_spd_needed(ap)) {
2473 /* SATA spec says nothing about how to reconfigure
2474 * spd. To be on the safe side, turn off phy during
2475 * reconfiguration. This works for at least ICH7 AHCI
2476 * and Sil3124.
2477 */
2478 scontrol = scr_read(ap, SCR_CONTROL);
2479 scontrol = (scontrol & 0x0f0) | 0x302;
2480 scr_write_flush(ap, SCR_CONTROL, scontrol);
2481
2482 ata_set_sata_spd(ap);
2483 }
2484
2485 /* issue phy wake/reset */
2271 scontrol = scr_read(ap, SCR_CONTROL); 2486 scontrol = scr_read(ap, SCR_CONTROL);
2272 scontrol = (scontrol & 0x0f0) | 0x301; 2487 scontrol = (scontrol & 0x0f0) | 0x301;
2273 scr_write_flush(ap, SCR_CONTROL, scontrol); 2488 scr_write_flush(ap, SCR_CONTROL, scontrol);
2274 2489
2275 /* 2490 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
2276 * Couldn't find anything in SATA I/II specs, but AHCI-1.1
2277 * 10.4.2 says at least 1 ms. 2491 * 10.4.2 says at least 1 ms.
2278 */ 2492 */
2279 msleep(1); 2493 msleep(1);
2280 2494
2281 /* Bring phy back */ 2495 /* bring phy back */
2282 sata_phy_resume(ap); 2496 sata_phy_resume(ap);
2283 2497
2284 /* TODO: phy layer with polling, timeouts, etc. */ 2498 /* TODO: phy layer with polling, timeouts, etc. */
@@ -2385,9 +2599,9 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes)
2385 ata_std_postreset, classes); 2599 ata_std_postreset, classes);
2386} 2600}
2387 2601
2388static int ata_do_reset(struct ata_port *ap, 2602int ata_do_reset(struct ata_port *ap,
2389 ata_reset_fn_t reset, ata_postreset_fn_t postreset, 2603 ata_reset_fn_t reset, ata_postreset_fn_t postreset,
2390 int verbose, unsigned int *classes) 2604 int verbose, unsigned int *classes)
2391{ 2605{
2392 int i, rc; 2606 int i, rc;
2393 2607
@@ -2458,21 +2672,42 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit,
2458 if (probeinit) 2672 if (probeinit)
2459 probeinit(ap); 2673 probeinit(ap);
2460 2674
2461 if (softreset) { 2675 if (softreset && !ata_set_sata_spd_needed(ap)) {
2462 rc = ata_do_reset(ap, softreset, postreset, 0, classes); 2676 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2463 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN) 2677 if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN)
2464 goto done; 2678 goto done;
2679 printk(KERN_INFO "ata%u: softreset failed, will try "
2680 "hardreset in 5 secs\n", ap->id);
2681 ssleep(5);
2465 } 2682 }
2466 2683
2467 if (!hardreset) 2684 if (!hardreset)
2468 goto done; 2685 goto done;
2469 2686
2470 rc = ata_do_reset(ap, hardreset, postreset, 0, classes); 2687 while (1) {
2471 if (rc || classes[0] != ATA_DEV_UNKNOWN) 2688 rc = ata_do_reset(ap, hardreset, postreset, 0, classes);
2472 goto done; 2689 if (rc == 0) {
2690 if (classes[0] != ATA_DEV_UNKNOWN)
2691 goto done;
2692 break;
2693 }
2694
2695 if (ata_down_sata_spd_limit(ap))
2696 goto done;
2697
2698 printk(KERN_INFO "ata%u: hardreset failed, will retry "
2699 "in 5 secs\n", ap->id);
2700 ssleep(5);
2701 }
2702
2703 if (softreset) {
2704 printk(KERN_INFO "ata%u: hardreset succeeded without "
2705 "classification, will retry softreset in 5 secs\n",
2706 ap->id);
2707 ssleep(5);
2473 2708
2474 if (softreset)
2475 rc = ata_do_reset(ap, softreset, postreset, 0, classes); 2709 rc = ata_do_reset(ap, softreset, postreset, 0, classes);
2710 }
2476 2711
2477 done: 2712 done:
2478 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN) 2713 if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN)
@@ -2560,15 +2795,14 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev,
2560int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, 2795int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2561 int post_reset) 2796 int post_reset)
2562{ 2797{
2563 unsigned int class; 2798 unsigned int class = dev->class;
2564 u16 *id; 2799 u16 *id = NULL;
2565 int rc; 2800 int rc;
2566 2801
2567 if (!ata_dev_enabled(dev)) 2802 if (!ata_dev_enabled(dev)) {
2568 return -ENODEV; 2803 rc = -ENODEV;
2569 2804 goto fail;
2570 class = dev->class; 2805 }
2571 id = NULL;
2572 2806
2573 /* allocate & read ID data */ 2807 /* allocate & read ID data */
2574 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); 2808 rc = ata_dev_read_id(ap, dev, &class, post_reset, &id);
@@ -2585,7 +2819,9 @@ int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev,
2585 dev->id = id; 2819 dev->id = id;
2586 2820
2587 /* configure device according to the new ID */ 2821 /* configure device according to the new ID */
2588 return ata_dev_configure(ap, dev, 0); 2822 rc = ata_dev_configure(ap, dev, 0);
2823 if (rc == 0)
2824 return 0;
2589 2825
2590 fail: 2826 fail:
2591 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", 2827 printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n",
@@ -2687,23 +2923,34 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2687 unsigned long xfer_mask; 2923 unsigned long xfer_mask;
2688 int i; 2924 int i;
2689 2925
2690 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, 2926 xfer_mask = ata_pack_xfermask(ap->pio_mask,
2691 ap->udma_mask); 2927 ap->mwdma_mask, ap->udma_mask);
2928
2929 /* Apply cable rule here. Don't apply it early because when
2930 * we handle hot plug the cable type can itself change.
2931 */
2932 if (ap->cbl == ATA_CBL_PATA40)
2933 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2692 2934
2693 /* FIXME: Use port-wide xfermask for now */ 2935 /* FIXME: Use port-wide xfermask for now */
2694 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2936 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2695 struct ata_device *d = &ap->device[i]; 2937 struct ata_device *d = &ap->device[i];
2696 if (!ata_dev_enabled(d)) 2938
2939 if (ata_dev_absent(d))
2697 continue; 2940 continue;
2698 xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, 2941
2699 d->udma_mask); 2942 if (ata_dev_disabled(d)) {
2943 /* to avoid violating device selection timing */
2944 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2945 UINT_MAX, UINT_MAX);
2946 continue;
2947 }
2948
2949 xfer_mask &= ata_pack_xfermask(d->pio_mask,
2950 d->mwdma_mask, d->udma_mask);
2700 xfer_mask &= ata_id_xfermask(d->id); 2951 xfer_mask &= ata_id_xfermask(d->id);
2701 if (ata_dma_blacklisted(d)) 2952 if (ata_dma_blacklisted(d))
2702 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 2953 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2703 /* Apply cable rule here. Don't apply it early because when
2704 we handle hot plug the cable type can itself change */
2705 if (ap->cbl == ATA_CBL_PATA40)
2706 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
2707 } 2954 }
2708 2955
2709 if (ata_dma_blacklisted(dev)) 2956 if (ata_dma_blacklisted(dev))
@@ -2714,11 +2961,12 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev)
2714 if (hs->simplex_claimed) 2961 if (hs->simplex_claimed)
2715 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); 2962 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
2716 } 2963 }
2964
2717 if (ap->ops->mode_filter) 2965 if (ap->ops->mode_filter)
2718 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); 2966 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
2719 2967
2720 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, 2968 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
2721 &dev->udma_mask); 2969 &dev->mwdma_mask, &dev->udma_mask);
2722} 2970}
2723 2971
2724/** 2972/**
@@ -2752,7 +3000,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap,
2752 tf.protocol = ATA_PROT_NODATA; 3000 tf.protocol = ATA_PROT_NODATA;
2753 tf.nsect = dev->xfer_mode; 3001 tf.nsect = dev->xfer_mode;
2754 3002
2755 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3003 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
2756 3004
2757 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3005 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2758 return err_mask; 3006 return err_mask;
@@ -2792,7 +3040,7 @@ static unsigned int ata_dev_init_params(struct ata_port *ap,
2792 tf.nsect = sectors; 3040 tf.nsect = sectors;
2793 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ 3041 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
2794 3042
2795 err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 3043 err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
2796 3044
2797 DPRINTK("EXIT, err_mask=%x\n", err_mask); 3045 DPRINTK("EXIT, err_mask=%x\n", err_mask);
2798 return err_mask; 3046 return err_mask;
@@ -3838,8 +4086,8 @@ fsm_start:
3838 4086
3839static void ata_pio_task(void *_data) 4087static void ata_pio_task(void *_data)
3840{ 4088{
3841 struct ata_port *ap = _data; 4089 struct ata_queued_cmd *qc = _data;
3842 struct ata_queued_cmd *qc; 4090 struct ata_port *ap = qc->ap;
3843 u8 status; 4091 u8 status;
3844 int poll_next; 4092 int poll_next;
3845 4093
@@ -4392,7 +4640,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
4392 4640
4393 ap = host_set->ports[i]; 4641 ap = host_set->ports[i];
4394 if (ap && 4642 if (ap &&
4395 !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 4643 !(ap->flags & ATA_FLAG_DISABLED)) {
4396 struct ata_queued_cmd *qc; 4644 struct ata_queued_cmd *qc;
4397 4645
4398 qc = ata_qc_from_tag(ap, ap->active_tag); 4646 qc = ata_qc_from_tag(ap, ap->active_tag);
@@ -4424,7 +4672,7 @@ static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev,
4424 tf.flags |= ATA_TFLAG_DEVICE; 4672 tf.flags |= ATA_TFLAG_DEVICE;
4425 tf.protocol = ATA_PROT_NODATA; 4673 tf.protocol = ATA_PROT_NODATA;
4426 4674
4427 err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); 4675 err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0);
4428 if (err) 4676 if (err)
4429 printk(KERN_ERR "%s: ata command failed: %d\n", 4677 printk(KERN_ERR "%s: ata command failed: %d\n",
4430 __FUNCTION__, err); 4678 __FUNCTION__, err);
@@ -4613,7 +4861,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4613 host->unique_id = ata_unique_id++; 4861 host->unique_id = ata_unique_id++;
4614 host->max_cmd_len = 12; 4862 host->max_cmd_len = 12;
4615 4863
4616 ap->flags = ATA_FLAG_PORT_DISABLED; 4864 ap->flags = ATA_FLAG_DISABLED;
4617 ap->id = host->unique_id; 4865 ap->id = host->unique_id;
4618 ap->host = host; 4866 ap->host = host;
4619 ap->ctl = ATA_DEVCTL_OBS; 4867 ap->ctl = ATA_DEVCTL_OBS;
@@ -4628,6 +4876,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
4628 ap->flags |= ent->host_flags; 4876 ap->flags |= ent->host_flags;
4629 ap->ops = ent->port_ops; 4877 ap->ops = ent->port_ops;
4630 ap->cbl = ATA_CBL_NONE; 4878 ap->cbl = ATA_CBL_NONE;
4879 ap->sata_spd_limit = UINT_MAX;
4631 ap->active_tag = ATA_TAG_POISON; 4880 ap->active_tag = ATA_TAG_POISON;
4632 ap->last_ctl = 0xFF; 4881 ap->last_ctl = 0xFF;
4633 4882
@@ -5083,7 +5332,6 @@ EXPORT_SYMBOL_GPL(ata_sg_init);
5083EXPORT_SYMBOL_GPL(ata_sg_init_one); 5332EXPORT_SYMBOL_GPL(ata_sg_init_one);
5084EXPORT_SYMBOL_GPL(__ata_qc_complete); 5333EXPORT_SYMBOL_GPL(__ata_qc_complete);
5085EXPORT_SYMBOL_GPL(ata_qc_issue_prot); 5334EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
5086EXPORT_SYMBOL_GPL(ata_eng_timeout);
5087EXPORT_SYMBOL_GPL(ata_tf_load); 5335EXPORT_SYMBOL_GPL(ata_tf_load);
5088EXPORT_SYMBOL_GPL(ata_tf_read); 5336EXPORT_SYMBOL_GPL(ata_tf_read);
5089EXPORT_SYMBOL_GPL(ata_noop_dev_select); 5337EXPORT_SYMBOL_GPL(ata_noop_dev_select);
@@ -5123,15 +5371,12 @@ EXPORT_SYMBOL_GPL(ata_busy_sleep);
5123EXPORT_SYMBOL_GPL(ata_port_queue_task); 5371EXPORT_SYMBOL_GPL(ata_port_queue_task);
5124EXPORT_SYMBOL_GPL(ata_scsi_ioctl); 5372EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
5125EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); 5373EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
5126EXPORT_SYMBOL_GPL(ata_scsi_error);
5127EXPORT_SYMBOL_GPL(ata_scsi_slave_config); 5374EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
5128EXPORT_SYMBOL_GPL(ata_scsi_release); 5375EXPORT_SYMBOL_GPL(ata_scsi_release);
5129EXPORT_SYMBOL_GPL(ata_host_intr); 5376EXPORT_SYMBOL_GPL(ata_host_intr);
5130EXPORT_SYMBOL_GPL(ata_id_string); 5377EXPORT_SYMBOL_GPL(ata_id_string);
5131EXPORT_SYMBOL_GPL(ata_id_c_string); 5378EXPORT_SYMBOL_GPL(ata_id_c_string);
5132EXPORT_SYMBOL_GPL(ata_scsi_simulate); 5379EXPORT_SYMBOL_GPL(ata_scsi_simulate);
5133EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5134EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
5135 5380
5136EXPORT_SYMBOL_GPL(ata_pio_need_iordy); 5381EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
5137EXPORT_SYMBOL_GPL(ata_timing_compute); 5382EXPORT_SYMBOL_GPL(ata_timing_compute);
@@ -5153,3 +5398,8 @@ EXPORT_SYMBOL_GPL(ata_device_suspend);
5153EXPORT_SYMBOL_GPL(ata_device_resume); 5398EXPORT_SYMBOL_GPL(ata_device_resume);
5154EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); 5399EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
5155EXPORT_SYMBOL_GPL(ata_scsi_device_resume); 5400EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
5401
5402EXPORT_SYMBOL_GPL(ata_scsi_error);
5403EXPORT_SYMBOL_GPL(ata_eng_timeout);
5404EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
5405EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
diff --git a/drivers/scsi/libata-eh.c b/drivers/scsi/libata-eh.c
new file mode 100644
index 000000000000..e73f5612aea8
--- /dev/null
+++ b/drivers/scsi/libata-eh.c
@@ -0,0 +1,264 @@
1/*
2 * libata-eh.c - libata error handling
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
24 * USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
35#include <linux/config.h>
36#include <linux/kernel.h>
37#include <scsi/scsi.h>
38#include <scsi/scsi_host.h>
39#include <scsi/scsi_eh.h>
40#include <scsi/scsi_device.h>
41#include <scsi/scsi_cmnd.h>
42
43#include <linux/libata.h>
44
45#include "libata.h"
46
47/**
48 * ata_scsi_timed_out - SCSI layer time out callback
49 * @cmd: timed out SCSI command
50 *
51 * Handles SCSI layer timeout. We race with normal completion of
52 * the qc for @cmd. If the qc is already gone, we lose and let
53 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
54 * timed out and EH should be invoked. Prevent ata_qc_complete()
55 * from finishing it by setting EH_SCHEDULED and return
56 * EH_NOT_HANDLED.
57 *
58 * LOCKING:
59 * Called from timer context
60 *
61 * RETURNS:
62 * EH_HANDLED or EH_NOT_HANDLED
63 */
64enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
65{
66 struct Scsi_Host *host = cmd->device->host;
67 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
68 unsigned long flags;
69 struct ata_queued_cmd *qc;
70 enum scsi_eh_timer_return ret = EH_HANDLED;
71
72 DPRINTK("ENTER\n");
73
74 spin_lock_irqsave(&ap->host_set->lock, flags);
75 qc = ata_qc_from_tag(ap, ap->active_tag);
76 if (qc) {
77 WARN_ON(qc->scsicmd != cmd);
78 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
79 qc->err_mask |= AC_ERR_TIMEOUT;
80 ret = EH_NOT_HANDLED;
81 }
82 spin_unlock_irqrestore(&ap->host_set->lock, flags);
83
84 DPRINTK("EXIT, ret=%d\n", ret);
85 return ret;
86}
87
88/**
89 * ata_scsi_error - SCSI layer error handler callback
90 * @host: SCSI host on which error occurred
91 *
92 * Handles SCSI-layer-thrown error events.
93 *
94 * LOCKING:
95 * Inherited from SCSI layer (none, can sleep)
96 *
97 * RETURNS:
98 * Zero.
99 */
100int ata_scsi_error(struct Scsi_Host *host)
101{
102 struct ata_port *ap = (struct ata_port *)&host->hostdata[0];
103
104 DPRINTK("ENTER\n");
105
106 /* synchronize with IRQ handler and port task */
107 spin_unlock_wait(&ap->host_set->lock);
108 ata_port_flush_task(ap);
109
110 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
111
112 ap->ops->eng_timeout(ap);
113
114 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
115
116 scsi_eh_flush_done_q(&ap->eh_done_q);
117
118 DPRINTK("EXIT\n");
119 return 0;
120}
121
122/**
123 * ata_qc_timeout - Handle timeout of queued command
124 * @qc: Command that timed out
125 *
126 * Some part of the kernel (currently, only the SCSI layer)
127 * has noticed that the active command on port @ap has not
128 * completed after a specified length of time. Handle this
129 * condition by disabling DMA (if necessary) and completing
130 * transactions, with error if necessary.
131 *
132 * This also handles the case of the "lost interrupt", where
133 * for some reason (possibly hardware bug, possibly driver bug)
134 * an interrupt was not delivered to the driver, even though the
135 * transaction completed successfully.
136 *
137 * LOCKING:
138 * Inherited from SCSI layer (none, can sleep)
139 */
140static void ata_qc_timeout(struct ata_queued_cmd *qc)
141{
142 struct ata_port *ap = qc->ap;
143 struct ata_host_set *host_set = ap->host_set;
144 u8 host_stat = 0, drv_stat;
145 unsigned long flags;
146
147 DPRINTK("ENTER\n");
148
149 ap->hsm_task_state = HSM_ST_IDLE;
150
151 spin_lock_irqsave(&host_set->lock, flags);
152
153 switch (qc->tf.protocol) {
154
155 case ATA_PROT_DMA:
156 case ATA_PROT_ATAPI_DMA:
157 host_stat = ap->ops->bmdma_status(ap);
158
159 /* before we do anything else, clear DMA-Start bit */
160 ap->ops->bmdma_stop(qc);
161
162 /* fall through */
163
164 default:
165 ata_altstatus(ap);
166 drv_stat = ata_chk_status(ap);
167
168 /* ack bmdma irq events */
169 ap->ops->irq_clear(ap);
170
171 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
172 ap->id, qc->tf.command, drv_stat, host_stat);
173
174 /* complete taskfile transaction */
175 qc->err_mask |= ac_err_mask(drv_stat);
176 break;
177 }
178
179 spin_unlock_irqrestore(&host_set->lock, flags);
180
181 ata_eh_qc_complete(qc);
182
183 DPRINTK("EXIT\n");
184}
185
186/**
187 * ata_eng_timeout - Handle timeout of queued command
188 * @ap: Port on which timed-out command is active
189 *
190 * Some part of the kernel (currently, only the SCSI layer)
191 * has noticed that the active command on port @ap has not
192 * completed after a specified length of time. Handle this
193 * condition by disabling DMA (if necessary) and completing
194 * transactions, with error if necessary.
195 *
196 * This also handles the case of the "lost interrupt", where
197 * for some reason (possibly hardware bug, possibly driver bug)
198 * an interrupt was not delivered to the driver, even though the
199 * transaction completed successfully.
200 *
201 * LOCKING:
202 * Inherited from SCSI layer (none, can sleep)
203 */
204void ata_eng_timeout(struct ata_port *ap)
205{
206 DPRINTK("ENTER\n");
207
208 ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag));
209
210 DPRINTK("EXIT\n");
211}
212
213static void ata_eh_scsidone(struct scsi_cmnd *scmd)
214{
215 /* nada */
216}
217
218static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
219{
220 struct ata_port *ap = qc->ap;
221 struct scsi_cmnd *scmd = qc->scsicmd;
222 unsigned long flags;
223
224 spin_lock_irqsave(&ap->host_set->lock, flags);
225 qc->scsidone = ata_eh_scsidone;
226 __ata_qc_complete(qc);
227 WARN_ON(ata_tag_valid(qc->tag));
228 spin_unlock_irqrestore(&ap->host_set->lock, flags);
229
230 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
231}
232
233/**
234 * ata_eh_qc_complete - Complete an active ATA command from EH
235 * @qc: Command to complete
236 *
237 * Indicate to the mid and upper layers that an ATA command has
238 * completed. To be used from EH.
239 */
240void ata_eh_qc_complete(struct ata_queued_cmd *qc)
241{
242 struct scsi_cmnd *scmd = qc->scsicmd;
243 scmd->retries = scmd->allowed;
244 __ata_eh_qc_complete(qc);
245}
246
247/**
248 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
249 * @qc: Command to retry
250 *
251 * Indicate to the mid and upper layers that an ATA command
252 * should be retried. To be used from EH.
253 *
254 * SCSI midlayer limits the number of retries to scmd->allowed.
255 * scmd->retries is decremented for commands which get retried
256 * due to unrelated failures (qc->err_mask is zero).
257 */
258void ata_eh_qc_retry(struct ata_queued_cmd *qc)
259{
260 struct scsi_cmnd *scmd = qc->scsicmd;
261 if (!qc->err_mask && scmd->retries)
262 scmd->retries--;
263 __ata_eh_qc_complete(qc);
264}
diff --git a/drivers/scsi/libata-scsi.c b/drivers/scsi/libata-scsi.c
index c1a4b29a9ae1..745fc263feeb 100644
--- a/drivers/scsi/libata-scsi.c
+++ b/drivers/scsi/libata-scsi.c
@@ -53,7 +53,6 @@
53typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd); 53typedef unsigned int (*ata_xlat_func_t)(struct ata_queued_cmd *qc, const u8 *scsicmd);
54static struct ata_device * 54static struct ata_device *
55ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev); 55ata_scsi_find_dev(struct ata_port *ap, const struct scsi_device *scsidev);
56enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
57 56
58#define RW_RECOVERY_MPAGE 0x1 57#define RW_RECOVERY_MPAGE 0x1
59#define RW_RECOVERY_MPAGE_LEN 12 58#define RW_RECOVERY_MPAGE_LEN 12
@@ -546,16 +545,11 @@ void ata_gen_ata_desc_sense(struct ata_queued_cmd *qc)
546 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 545 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
547 546
548 /* 547 /*
549 * Read the controller registers.
550 */
551 WARN_ON(qc->ap->ops->tf_read == NULL);
552 qc->ap->ops->tf_read(qc->ap, tf);
553
554 /*
555 * Use ata_to_sense_error() to map status register bits 548 * Use ata_to_sense_error() to map status register bits
556 * onto sense key, asc & ascq. 549 * onto sense key, asc & ascq.
557 */ 550 */
558 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 551 if (qc->err_mask ||
552 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
559 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 553 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
560 &sb[1], &sb[2], &sb[3]); 554 &sb[1], &sb[2], &sb[3]);
561 sb[1] &= 0x0f; 555 sb[1] &= 0x0f;
@@ -621,16 +615,11 @@ void ata_gen_fixed_sense(struct ata_queued_cmd *qc)
621 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION; 615 cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
622 616
623 /* 617 /*
624 * Read the controller registers.
625 */
626 WARN_ON(qc->ap->ops->tf_read == NULL);
627 qc->ap->ops->tf_read(qc->ap, tf);
628
629 /*
630 * Use ata_to_sense_error() to map status register bits 618 * Use ata_to_sense_error() to map status register bits
631 * onto sense key, asc & ascq. 619 * onto sense key, asc & ascq.
632 */ 620 */
633 if (tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) { 621 if (qc->err_mask ||
622 tf->command & (ATA_BUSY | ATA_DF | ATA_ERR | ATA_DRQ)) {
634 ata_to_sense_error(qc->ap->id, tf->command, tf->feature, 623 ata_to_sense_error(qc->ap->id, tf->command, tf->feature,
635 &sb[2], &sb[12], &sb[13]); 624 &sb[2], &sb[12], &sb[13]);
636 sb[2] &= 0x0f; 625 sb[2] &= 0x0f;
@@ -724,141 +713,6 @@ int ata_scsi_slave_config(struct scsi_device *sdev)
724} 713}
725 714
726/** 715/**
727 * ata_scsi_timed_out - SCSI layer time out callback
728 * @cmd: timed out SCSI command
729 *
730 * Handles SCSI layer timeout. We race with normal completion of
731 * the qc for @cmd. If the qc is already gone, we lose and let
732 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
733 * timed out and EH should be invoked. Prevent ata_qc_complete()
734 * from finishing it by setting EH_SCHEDULED and return
735 * EH_NOT_HANDLED.
736 *
737 * LOCKING:
738 * Called from timer context
739 *
740 * RETURNS:
741 * EH_HANDLED or EH_NOT_HANDLED
742 */
743enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
744{
745 struct Scsi_Host *host = cmd->device->host;
746 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
747 unsigned long flags;
748 struct ata_queued_cmd *qc;
749 enum scsi_eh_timer_return ret = EH_HANDLED;
750
751 DPRINTK("ENTER\n");
752
753 spin_lock_irqsave(&ap->host_set->lock, flags);
754 qc = ata_qc_from_tag(ap, ap->active_tag);
755 if (qc) {
756 WARN_ON(qc->scsicmd != cmd);
757 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
758 qc->err_mask |= AC_ERR_TIMEOUT;
759 ret = EH_NOT_HANDLED;
760 }
761 spin_unlock_irqrestore(&ap->host_set->lock, flags);
762
763 DPRINTK("EXIT, ret=%d\n", ret);
764 return ret;
765}
766
767/**
768 * ata_scsi_error - SCSI layer error handler callback
769 * @host: SCSI host on which error occurred
770 *
771 * Handles SCSI-layer-thrown error events.
772 *
773 * LOCKING:
774 * Inherited from SCSI layer (none, can sleep)
775 *
776 * RETURNS:
777 * Zero.
778 */
779
780int ata_scsi_error(struct Scsi_Host *host)
781{
782 struct ata_port *ap;
783 unsigned long flags;
784
785 DPRINTK("ENTER\n");
786
787 ap = (struct ata_port *) &host->hostdata[0];
788
789 spin_lock_irqsave(&ap->host_set->lock, flags);
790 WARN_ON(ap->flags & ATA_FLAG_IN_EH);
791 ap->flags |= ATA_FLAG_IN_EH;
792 WARN_ON(ata_qc_from_tag(ap, ap->active_tag) == NULL);
793 spin_unlock_irqrestore(&ap->host_set->lock, flags);
794
795 ata_port_flush_task(ap);
796
797 ap->ops->eng_timeout(ap);
798
799 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
800
801 scsi_eh_flush_done_q(&ap->eh_done_q);
802
803 spin_lock_irqsave(&ap->host_set->lock, flags);
804 ap->flags &= ~ATA_FLAG_IN_EH;
805 spin_unlock_irqrestore(&ap->host_set->lock, flags);
806
807 DPRINTK("EXIT\n");
808 return 0;
809}
810
811static void ata_eh_scsidone(struct scsi_cmnd *scmd)
812{
813 /* nada */
814}
815
816static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
817{
818 struct ata_port *ap = qc->ap;
819 struct scsi_cmnd *scmd = qc->scsicmd;
820 unsigned long flags;
821
822 spin_lock_irqsave(&ap->host_set->lock, flags);
823 qc->scsidone = ata_eh_scsidone;
824 __ata_qc_complete(qc);
825 WARN_ON(ata_tag_valid(qc->tag));
826 spin_unlock_irqrestore(&ap->host_set->lock, flags);
827
828 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
829}
830
831/**
832 * ata_eh_qc_complete - Complete an active ATA command from EH
833 * @qc: Command to complete
834 *
835 * Indicate to the mid and upper layers that an ATA command has
836 * completed. To be used from EH.
837 */
838void ata_eh_qc_complete(struct ata_queued_cmd *qc)
839{
840 struct scsi_cmnd *scmd = qc->scsicmd;
841 scmd->retries = scmd->allowed;
842 __ata_eh_qc_complete(qc);
843}
844
845/**
846 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
847 * @qc: Command to retry
848 *
849 * Indicate to the mid and upper layers that an ATA command
850 * should be retried. To be used from EH.
851 *
852 * SCSI midlayer limits the number of retries to scmd->allowed.
853 * This function might need to adjust scmd->retries for commands
854 * which get retried due to unrelated NCQ failures.
855 */
856void ata_eh_qc_retry(struct ata_queued_cmd *qc)
857{
858 __ata_eh_qc_complete(qc);
859}
860
861/**
862 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command 716 * ata_scsi_start_stop_xlat - Translate SCSI START STOP UNIT command
863 * @qc: Storage for translated ATA taskfile 717 * @qc: Storage for translated ATA taskfile
864 * @scsicmd: SCSI command to translate 718 * @scsicmd: SCSI command to translate
@@ -1197,6 +1051,7 @@ static unsigned int ata_scsi_rw_xlat(struct ata_queued_cmd *qc, const u8 *scsicm
1197 u64 block; 1051 u64 block;
1198 u32 n_block; 1052 u32 n_block;
1199 1053
1054 qc->flags |= ATA_QCFLAG_IO;
1200 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE; 1055 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1201 1056
1202 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 || 1057 if (scsicmd[0] == WRITE_10 || scsicmd[0] == WRITE_6 ||
@@ -1343,11 +1198,14 @@ static void ata_scsi_qc_complete(struct ata_queued_cmd *qc)
1343 */ 1198 */
1344 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) && 1199 if (((cdb[0] == ATA_16) || (cdb[0] == ATA_12)) &&
1345 ((cdb[2] & 0x20) || need_sense)) { 1200 ((cdb[2] & 0x20) || need_sense)) {
1201 qc->ap->ops->tf_read(qc->ap, &qc->tf);
1346 ata_gen_ata_desc_sense(qc); 1202 ata_gen_ata_desc_sense(qc);
1347 } else { 1203 } else {
1348 if (!need_sense) { 1204 if (!need_sense) {
1349 cmd->result = SAM_STAT_GOOD; 1205 cmd->result = SAM_STAT_GOOD;
1350 } else { 1206 } else {
1207 qc->ap->ops->tf_read(qc->ap, &qc->tf);
1208
1351 /* TODO: decide which descriptor format to use 1209 /* TODO: decide which descriptor format to use
1352 * for 48b LBA devices and call that here 1210 * for 48b LBA devices and call that here
1353 * instead of the fixed desc, which is only 1211 * instead of the fixed desc, which is only
@@ -2139,13 +1997,15 @@ void ata_scsi_badcmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *), u8
2139 1997
2140static void atapi_sense_complete(struct ata_queued_cmd *qc) 1998static void atapi_sense_complete(struct ata_queued_cmd *qc)
2141{ 1999{
2142 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) 2000 if (qc->err_mask && ((qc->err_mask & AC_ERR_DEV) == 0)) {
2143 /* FIXME: not quite right; we don't want the 2001 /* FIXME: not quite right; we don't want the
2144 * translation of taskfile registers into 2002 * translation of taskfile registers into
2145 * a sense descriptors, since that's only 2003 * a sense descriptors, since that's only
2146 * correct for ATA, not ATAPI 2004 * correct for ATA, not ATAPI
2147 */ 2005 */
2006 qc->ap->ops->tf_read(qc->ap, &qc->tf);
2148 ata_gen_ata_desc_sense(qc); 2007 ata_gen_ata_desc_sense(qc);
2008 }
2149 2009
2150 qc->scsidone(qc->scsicmd); 2010 qc->scsidone(qc->scsicmd);
2151 ata_qc_free(qc); 2011 ata_qc_free(qc);
@@ -2213,17 +2073,15 @@ static void atapi_qc_complete(struct ata_queued_cmd *qc)
2213 cmd->result = SAM_STAT_CHECK_CONDITION; 2073 cmd->result = SAM_STAT_CHECK_CONDITION;
2214 atapi_request_sense(qc); 2074 atapi_request_sense(qc);
2215 return; 2075 return;
2216 } 2076 } else if (unlikely(err_mask)) {
2217
2218 else if (unlikely(err_mask))
2219 /* FIXME: not quite right; we don't want the 2077 /* FIXME: not quite right; we don't want the
2220 * translation of taskfile registers into 2078 * translation of taskfile registers into
2221 * a sense descriptors, since that's only 2079 * a sense descriptors, since that's only
2222 * correct for ATA, not ATAPI 2080 * correct for ATA, not ATAPI
2223 */ 2081 */
2082 qc->ap->ops->tf_read(qc->ap, &qc->tf);
2224 ata_gen_ata_desc_sense(qc); 2083 ata_gen_ata_desc_sense(qc);
2225 2084 } else {
2226 else {
2227 u8 *scsicmd = cmd->cmnd; 2085 u8 *scsicmd = cmd->cmnd;
2228 2086
2229 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) { 2087 if ((scsicmd[0] == INQUIRY) && ((scsicmd[1] & 0x03) == 0)) {
@@ -2737,7 +2595,7 @@ void ata_scsi_scan_host(struct ata_port *ap)
2737 struct ata_device *dev; 2595 struct ata_device *dev;
2738 unsigned int i; 2596 unsigned int i;
2739 2597
2740 if (ap->flags & ATA_FLAG_PORT_DISABLED) 2598 if (ap->flags & ATA_FLAG_DISABLED)
2741 return; 2599 return;
2742 2600
2743 for (i = 0; i < ATA_MAX_DEVICES; i++) { 2601 for (i = 0; i < ATA_MAX_DEVICES; i++) {
diff --git a/drivers/scsi/libata.h b/drivers/scsi/libata.h
index 1c755b14521a..31efc2e60b69 100644
--- a/drivers/scsi/libata.h
+++ b/drivers/scsi/libata.h
@@ -45,7 +45,20 @@ extern int libata_fua;
45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap, 45extern struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
46 struct ata_device *dev); 46 struct ata_device *dev);
47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc); 47extern int ata_rwcmd_protocol(struct ata_queued_cmd *qc);
48extern void ata_dev_disable(struct ata_port *ap, struct ata_device *dev);
48extern void ata_port_flush_task(struct ata_port *ap); 49extern void ata_port_flush_task(struct ata_port *ap);
50extern unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev,
51 struct ata_taskfile *tf, const u8 *cdb,
52 int dma_dir, void *buf, unsigned int buflen);
53extern int ata_down_sata_spd_limit(struct ata_port *ap);
54extern int ata_set_sata_spd_needed(struct ata_port *ap);
55extern int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev,
56 int force_pio0);
57extern int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev);
58extern int ata_do_reset(struct ata_port *ap,
59 ata_reset_fn_t reset,
60 ata_postreset_fn_t postreset,
61 int verbose, unsigned int *classes);
49extern void ata_qc_free(struct ata_queued_cmd *qc); 62extern void ata_qc_free(struct ata_queued_cmd *qc);
50extern void ata_qc_issue(struct ata_queued_cmd *qc); 63extern void ata_qc_issue(struct ata_queued_cmd *qc);
51extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); 64extern int ata_check_atapi_dma(struct ata_queued_cmd *qc);
@@ -60,7 +73,6 @@ extern int ata_cmd_ioctl(struct scsi_device *scsidev, void __user *arg);
60extern struct scsi_transport_template ata_scsi_transport_template; 73extern struct scsi_transport_template ata_scsi_transport_template;
61 74
62extern void ata_scsi_scan_host(struct ata_port *ap); 75extern void ata_scsi_scan_host(struct ata_port *ap);
63extern int ata_scsi_error(struct Scsi_Host *host);
64extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf, 76extern unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf,
65 unsigned int buflen); 77 unsigned int buflen);
66 78
@@ -90,4 +102,7 @@ extern void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
90 unsigned int (*actor) (struct ata_scsi_args *args, 102 unsigned int (*actor) (struct ata_scsi_args *args,
91 u8 *rbuf, unsigned int buflen)); 103 u8 *rbuf, unsigned int buflen));
92 104
105/* libata-eh.c */
106extern enum scsi_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd);
107
93#endif /* __LIBATA_H__ */ 108#endif /* __LIBATA_H__ */
diff --git a/drivers/scsi/pdc_adma.c b/drivers/scsi/pdc_adma.c
index ff4fec96271b..a4dcb4352206 100644
--- a/drivers/scsi/pdc_adma.c
+++ b/drivers/scsi/pdc_adma.c
@@ -456,7 +456,7 @@ static inline unsigned int adma_intr_pkt(struct ata_host_set *host_set)
456 continue; 456 continue;
457 handled = 1; 457 handled = 1;
458 adma_enter_reg_mode(ap); 458 adma_enter_reg_mode(ap);
459 if (ap->flags & ATA_FLAG_PORT_DISABLED) 459 if (ap->flags & ATA_FLAG_DISABLED)
460 continue; 460 continue;
461 pp = ap->private_data; 461 pp = ap->private_data;
462 if (!pp || pp->state != adma_state_pkt) 462 if (!pp || pp->state != adma_state_pkt)
@@ -481,7 +481,7 @@ static inline unsigned int adma_intr_mmio(struct ata_host_set *host_set)
481 for (port_no = 0; port_no < host_set->n_ports; ++port_no) { 481 for (port_no = 0; port_no < host_set->n_ports; ++port_no) {
482 struct ata_port *ap; 482 struct ata_port *ap;
483 ap = host_set->ports[port_no]; 483 ap = host_set->ports[port_no];
484 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) { 484 if (ap && (!(ap->flags & ATA_FLAG_DISABLED))) {
485 struct ata_queued_cmd *qc; 485 struct ata_queued_cmd *qc;
486 struct adma_port_priv *pp = ap->private_data; 486 struct adma_port_priv *pp = ap->private_data;
487 if (!pp || pp->state != adma_state_mmio) 487 if (!pp || pp->state != adma_state_mmio)
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index dc54f294fac8..fd9f2173f062 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -1397,7 +1397,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1397 } 1397 }
1398 } 1398 }
1399 1399
1400 if (ap && (ap->flags & ATA_FLAG_PORT_DISABLED)) 1400 if (ap && (ap->flags & ATA_FLAG_DISABLED))
1401 continue; 1401 continue;
1402 1402
1403 err_mask = ac_err_mask(ata_status); 1403 err_mask = ac_err_mask(ata_status);
diff --git a/drivers/scsi/sata_nv.c b/drivers/scsi/sata_nv.c
index 8a99c3827426..be38f328a479 100644
--- a/drivers/scsi/sata_nv.c
+++ b/drivers/scsi/sata_nv.c
@@ -280,7 +280,7 @@ static irqreturn_t nv_interrupt (int irq, void *dev_instance,
280 280
281 ap = host_set->ports[i]; 281 ap = host_set->ports[i];
282 if (ap && 282 if (ap &&
283 !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 283 !(ap->flags & ATA_FLAG_DISABLED)) {
284 struct ata_queued_cmd *qc; 284 struct ata_queued_cmd *qc;
285 285
286 qc = ata_qc_from_tag(ap, ap->active_tag); 286 qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_promise.c b/drivers/scsi/sata_promise.c
index cf0eeba47eb6..322525d84907 100644
--- a/drivers/scsi/sata_promise.c
+++ b/drivers/scsi/sata_promise.c
@@ -535,7 +535,7 @@ static irqreturn_t pdc_interrupt (int irq, void *dev_instance, struct pt_regs *r
535 ap = host_set->ports[i]; 535 ap = host_set->ports[i];
536 tmp = mask & (1 << (i + 1)); 536 tmp = mask & (1 << (i + 1));
537 if (tmp && ap && 537 if (tmp && ap &&
538 !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 538 !(ap->flags & ATA_FLAG_DISABLED)) {
539 struct ata_queued_cmd *qc; 539 struct ata_queued_cmd *qc;
540 540
541 qc = ata_qc_from_tag(ap, ap->active_tag); 541 qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_qstor.c b/drivers/scsi/sata_qstor.c
index a547c1272a5e..f86858962fbe 100644
--- a/drivers/scsi/sata_qstor.c
+++ b/drivers/scsi/sata_qstor.c
@@ -395,7 +395,7 @@ static inline unsigned int qs_intr_pkt(struct ata_host_set *host_set)
395 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n", 395 DPRINTK("SFF=%08x%08x: sCHAN=%u sHST=%d sDST=%02x\n",
396 sff1, sff0, port_no, sHST, sDST); 396 sff1, sff0, port_no, sHST, sDST);
397 handled = 1; 397 handled = 1;
398 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 398 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
399 struct ata_queued_cmd *qc; 399 struct ata_queued_cmd *qc;
400 struct qs_port_priv *pp = ap->private_data; 400 struct qs_port_priv *pp = ap->private_data;
401 if (!pp || pp->state != qs_state_pkt) 401 if (!pp || pp->state != qs_state_pkt)
@@ -428,7 +428,7 @@ static inline unsigned int qs_intr_mmio(struct ata_host_set *host_set)
428 struct ata_port *ap; 428 struct ata_port *ap;
429 ap = host_set->ports[port_no]; 429 ap = host_set->ports[port_no];
430 if (ap && 430 if (ap &&
431 !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 431 !(ap->flags & ATA_FLAG_DISABLED)) {
432 struct ata_queued_cmd *qc; 432 struct ata_queued_cmd *qc;
433 struct qs_port_priv *pp = ap->private_data; 433 struct qs_port_priv *pp = ap->private_data;
434 if (!pp || pp->state != qs_state_mmio) 434 if (!pp || pp->state != qs_state_mmio)
diff --git a/drivers/scsi/sata_sil24.c b/drivers/scsi/sata_sil24.c
index 068c98a4111b..c34f6dabf418 100644
--- a/drivers/scsi/sata_sil24.c
+++ b/drivers/scsi/sata_sil24.c
@@ -770,7 +770,7 @@ static irqreturn_t sil24_interrupt(int irq, void *dev_instance, struct pt_regs *
770 for (i = 0; i < host_set->n_ports; i++) 770 for (i = 0; i < host_set->n_ports; i++)
771 if (status & (1 << i)) { 771 if (status & (1 << i)) {
772 struct ata_port *ap = host_set->ports[i]; 772 struct ata_port *ap = host_set->ports[i];
773 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 773 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
774 sil24_host_intr(host_set->ports[i]); 774 sil24_host_intr(host_set->ports[i]);
775 handled++; 775 handled++;
776 } else 776 } else
diff --git a/drivers/scsi/sata_sx4.c b/drivers/scsi/sata_sx4.c
index 2d6b091a9eaf..45c78c399bfd 100644
--- a/drivers/scsi/sata_sx4.c
+++ b/drivers/scsi/sata_sx4.c
@@ -834,7 +834,7 @@ static irqreturn_t pdc20621_interrupt (int irq, void *dev_instance, struct pt_re
834 tmp = mask & (1 << i); 834 tmp = mask & (1 << i);
835 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp); 835 VPRINTK("seq %u, port_no %u, ap %p, tmp %x\n", i, port_no, ap, tmp);
836 if (tmp && ap && 836 if (tmp && ap &&
837 !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 837 !(ap->flags & ATA_FLAG_DISABLED)) {
838 struct ata_queued_cmd *qc; 838 struct ata_queued_cmd *qc;
839 839
840 qc = ata_qc_from_tag(ap, ap->active_tag); 840 qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/drivers/scsi/sata_vsc.c b/drivers/scsi/sata_vsc.c
index 5af6d5f9f4bd..b7d6a31628c2 100644
--- a/drivers/scsi/sata_vsc.c
+++ b/drivers/scsi/sata_vsc.c
@@ -229,7 +229,7 @@ static irqreturn_t vsc_sata_interrupt (int irq, void *dev_instance,
229 handled++; 229 handled++;
230 } 230 }
231 231
232 if (ap && !(ap->flags & ATA_FLAG_PORT_DISABLED)) { 232 if (ap && !(ap->flags & ATA_FLAG_DISABLED)) {
233 struct ata_queued_cmd *qc; 233 struct ata_queued_cmd *qc;
234 234
235 qc = ata_qc_from_tag(ap, ap->active_tag); 235 qc = ata_qc_from_tag(ap, ap->active_tag);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index b0171e9accc4..cc6cc08e8010 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -120,10 +120,12 @@ enum {
120 ATA_SHT_USE_CLUSTERING = 1, 120 ATA_SHT_USE_CLUSTERING = 1,
121 121
122 /* struct ata_device stuff */ 122 /* struct ata_device stuff */
123 ATA_DFLAG_LBA48 = (1 << 0), /* device supports LBA48 */ 123 ATA_DFLAG_LBA = (1 << 0), /* device supports LBA */
124 ATA_DFLAG_PIO = (1 << 1), /* device currently in PIO mode */ 124 ATA_DFLAG_LBA48 = (1 << 1), /* device supports LBA48 */
125 ATA_DFLAG_LBA = (1 << 2), /* device supports LBA */ 125 ATA_DFLAG_CDB_INTR = (1 << 2), /* device asserts INTRQ when ready for CDB */
126 ATA_DFLAG_CDB_INTR = (1 << 3), /* device asserts INTRQ when ready for CDB */ 126 ATA_DFLAG_CFG_MASK = (1 << 8) - 1,
127
128 ATA_DFLAG_PIO = (1 << 8), /* device currently in PIO mode */
127 129
128 ATA_DEV_UNKNOWN = 0, /* unknown device */ 130 ATA_DEV_UNKNOWN = 0, /* unknown device */
129 ATA_DEV_ATA = 1, /* ATA device */ 131 ATA_DEV_ATA = 1, /* ATA device */
@@ -133,33 +135,35 @@ enum {
133 ATA_DEV_NONE = 5, /* no device */ 135 ATA_DEV_NONE = 5, /* no device */
134 136
135 /* struct ata_port flags */ 137 /* struct ata_port flags */
136 ATA_FLAG_SLAVE_POSS = (1 << 1), /* host supports slave dev */ 138 ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
137 /* (doesn't imply presence) */ 139 /* (doesn't imply presence) */
138 ATA_FLAG_PORT_DISABLED = (1 << 2), /* port is disabled, ignore it */ 140 ATA_FLAG_SATA = (1 << 1),
139 ATA_FLAG_SATA = (1 << 3), 141 ATA_FLAG_NO_LEGACY = (1 << 2), /* no legacy mode check */
140 ATA_FLAG_NO_LEGACY = (1 << 4), /* no legacy mode check */ 142 ATA_FLAG_MMIO = (1 << 3), /* use MMIO, not PIO */
141 ATA_FLAG_SRST = (1 << 5), /* (obsolete) use ATA SRST, not E.D.D. */ 143 ATA_FLAG_SRST = (1 << 4), /* (obsolete) use ATA SRST, not E.D.D. */
142 ATA_FLAG_MMIO = (1 << 6), /* use MMIO, not PIO */ 144 ATA_FLAG_SATA_RESET = (1 << 5), /* (obsolete) use COMRESET */
143 ATA_FLAG_SATA_RESET = (1 << 7), /* (obsolete) use COMRESET */ 145 ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
144 ATA_FLAG_PIO_DMA = (1 << 8), /* PIO cmds via DMA */ 146 ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
145 ATA_FLAG_PIO_POLLING = (1 << 9), /* use polling PIO if LLD 147 ATA_FLAG_PIO_LBA48 = (1 << 8), /* Host DMA engine is LBA28 only */
146 * doesn't handle PIO interrupts */ 148 ATA_FLAG_IRQ_MASK = (1 << 9), /* Mask IRQ in PIO xfers */
147 ATA_FLAG_DEBUGMSG = (1 << 10), 149 ATA_FLAG_PIO_POLLING = (1 << 10), /* use polling PIO if LLD
148 ATA_FLAG_NO_ATAPI = (1 << 11), /* No ATAPI support */ 150 * doesn't handle PIO interrupts */
149 151
150 ATA_FLAG_SUSPENDED = (1 << 12), /* port is suspended */ 152 ATA_FLAG_DEBUGMSG = (1 << 17),
151 153 ATA_FLAG_FLUSH_PORT_TASK = (1 << 18), /* flush port task */
152 ATA_FLAG_PIO_LBA48 = (1 << 13), /* Host DMA engine is LBA28 only */ 154
153 ATA_FLAG_IRQ_MASK = (1 << 14), /* Mask IRQ in PIO xfers */ 155 ATA_FLAG_DISABLED = (1 << 19), /* port is disabled, ignore it */
154 156 ATA_FLAG_SUSPENDED = (1 << 20), /* port is suspended */
155 ATA_FLAG_FLUSH_PORT_TASK = (1 << 15), /* Flush port task */ 157
156 ATA_FLAG_IN_EH = (1 << 16), /* EH in progress */ 158 /* bits 24:31 of ap->flags are reserved for LLDD specific flags */
157 159
158 ATA_QCFLAG_ACTIVE = (1 << 1), /* cmd not yet ack'd to scsi lyer */ 160 /* struct ata_queued_cmd flags */
159 ATA_QCFLAG_SG = (1 << 3), /* have s/g table? */ 161 ATA_QCFLAG_ACTIVE = (1 << 0), /* cmd not yet ack'd to scsi lyer */
160 ATA_QCFLAG_SINGLE = (1 << 4), /* no s/g, just a single buffer */ 162 ATA_QCFLAG_SG = (1 << 1), /* have s/g table? */
163 ATA_QCFLAG_SINGLE = (1 << 2), /* no s/g, just a single buffer */
161 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE, 164 ATA_QCFLAG_DMAMAP = ATA_QCFLAG_SG | ATA_QCFLAG_SINGLE,
162 ATA_QCFLAG_EH_SCHEDULED = (1 << 5), /* EH scheduled */ 165 ATA_QCFLAG_IO = (1 << 3), /* standard IO command */
166 ATA_QCFLAG_EH_SCHEDULED = (1 << 4), /* EH scheduled */
163 167
164 /* host set flags */ 168 /* host set flags */
165 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */ 169 ATA_HOST_SIMPLEX = (1 << 0), /* Host is simplex, one DMA channel per host_set only */
@@ -205,10 +209,13 @@ enum {
205 /* size of buffer to pad xfers ending on unaligned boundaries */ 209 /* size of buffer to pad xfers ending on unaligned boundaries */
206 ATA_DMA_PAD_SZ = 4, 210 ATA_DMA_PAD_SZ = 4,
207 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE, 211 ATA_DMA_PAD_BUF_SZ = ATA_DMA_PAD_SZ * ATA_MAX_QUEUE,
208 212
209 /* Masks for port functions */ 213 /* masks for port functions */
210 ATA_PORT_PRIMARY = (1 << 0), 214 ATA_PORT_PRIMARY = (1 << 0),
211 ATA_PORT_SECONDARY = (1 << 1), 215 ATA_PORT_SECONDARY = (1 << 1),
216
217 /* how hard are we gonna try to probe/recover devices */
218 ATA_PROBE_MAX_TRIES = 3,
212}; 219};
213 220
214enum hsm_task_states { 221enum hsm_task_states {
@@ -394,6 +401,7 @@ struct ata_port {
394 unsigned int mwdma_mask; 401 unsigned int mwdma_mask;
395 unsigned int udma_mask; 402 unsigned int udma_mask;
396 unsigned int cbl; /* cable type; ATA_CBL_xxx */ 403 unsigned int cbl; /* cable type; ATA_CBL_xxx */
404 unsigned int sata_spd_limit; /* SATA PHY speed limit */
397 405
398 struct ata_device device[ATA_MAX_DEVICES]; 406 struct ata_device device[ATA_MAX_DEVICES];
399 407
@@ -519,9 +527,6 @@ extern void ata_host_set_remove(struct ata_host_set *host_set);
519extern int ata_scsi_detect(struct scsi_host_template *sht); 527extern int ata_scsi_detect(struct scsi_host_template *sht);
520extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); 528extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
521extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); 529extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
522extern int ata_scsi_error(struct Scsi_Host *host);
523extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
524extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
525extern int ata_scsi_release(struct Scsi_Host *host); 530extern int ata_scsi_release(struct Scsi_Host *host);
526extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc); 531extern unsigned int ata_host_intr(struct ata_port *ap, struct ata_queued_cmd *qc);
527extern int ata_scsi_device_resume(struct scsi_device *); 532extern int ata_scsi_device_resume(struct scsi_device *);
@@ -570,7 +575,6 @@ extern void ata_bmdma_stop(struct ata_queued_cmd *qc);
570extern u8 ata_bmdma_status(struct ata_port *ap); 575extern u8 ata_bmdma_status(struct ata_port *ap);
571extern void ata_bmdma_irq_clear(struct ata_port *ap); 576extern void ata_bmdma_irq_clear(struct ata_port *ap);
572extern void __ata_qc_complete(struct ata_queued_cmd *qc); 577extern void __ata_qc_complete(struct ata_queued_cmd *qc);
573extern void ata_eng_timeout(struct ata_port *ap);
574extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev, 578extern void ata_scsi_simulate(struct ata_port *ap, struct ata_device *dev,
575 struct scsi_cmnd *cmd, 579 struct scsi_cmnd *cmd,
576 void (*done)(struct scsi_cmnd *)); 580 void (*done)(struct scsi_cmnd *));
@@ -625,6 +629,14 @@ extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bit
625extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long); 629extern unsigned long ata_pci_default_filter(const struct ata_port *, struct ata_device *, unsigned long);
626#endif /* CONFIG_PCI */ 630#endif /* CONFIG_PCI */
627 631
632/*
633 * EH
634 */
635extern int ata_scsi_error(struct Scsi_Host *host);
636extern void ata_eng_timeout(struct ata_port *ap);
637extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
638extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
639
628 640
629static inline int 641static inline int
630ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc) 642ata_sg_is_last(struct scatterlist *sg, struct ata_queued_cmd *qc)
@@ -678,6 +690,11 @@ static inline unsigned int ata_class_disabled(unsigned int class)
678 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP; 690 return class == ATA_DEV_ATA_UNSUP || class == ATA_DEV_ATAPI_UNSUP;
679} 691}
680 692
693static inline unsigned int ata_class_absent(unsigned int class)
694{
695 return !ata_class_enabled(class) && !ata_class_disabled(class);
696}
697
681static inline unsigned int ata_dev_enabled(const struct ata_device *dev) 698static inline unsigned int ata_dev_enabled(const struct ata_device *dev)
682{ 699{
683 return ata_class_enabled(dev->class); 700 return ata_class_enabled(dev->class);
@@ -688,6 +705,11 @@ static inline unsigned int ata_dev_disabled(const struct ata_device *dev)
688 return ata_class_disabled(dev->class); 705 return ata_class_disabled(dev->class);
689} 706}
690 707
708static inline unsigned int ata_dev_absent(const struct ata_device *dev)
709{
710 return ata_class_absent(dev->class);
711}
712
691static inline u8 ata_chk_status(struct ata_port *ap) 713static inline u8 ata_chk_status(struct ata_port *ap)
692{ 714{
693 return ap->ops->check_status(ap); 715 return ap->ops->check_status(ap);