diff options
author | Jeff Garzik <jeff@garzik.org> | 2006-04-12 16:54:16 -0400 |
---|---|---|
committer | Jeff Garzik <jeff@garzik.org> | 2006-04-12 16:54:16 -0400 |
commit | 875999c5539999f61a45620aae0c3e5fb1d2b035 (patch) | |
tree | 4535032a8a10f5782c0aef6a620b1a624ea9f863 /drivers/scsi/libata-core.c | |
parent | 79072f38909e3d9883317238887460c39ddcc4cb (diff) | |
parent | 26ec634c31a11a003040e10b4d650495158632fd (diff) |
Merge branch 'upstream'
Diffstat (limited to 'drivers/scsi/libata-core.c')
-rw-r--r-- | drivers/scsi/libata-core.c | 917 |
1 files changed, 556 insertions, 361 deletions
diff --git a/drivers/scsi/libata-core.c b/drivers/scsi/libata-core.c index 21b0ed583b8a..509178c3700c 100644 --- a/drivers/scsi/libata-core.c +++ b/drivers/scsi/libata-core.c | |||
@@ -65,7 +65,6 @@ static unsigned int ata_dev_init_params(struct ata_port *ap, | |||
65 | struct ata_device *dev, | 65 | struct ata_device *dev, |
66 | u16 heads, | 66 | u16 heads, |
67 | u16 sectors); | 67 | u16 sectors); |
68 | static void ata_set_mode(struct ata_port *ap); | ||
69 | static unsigned int ata_dev_set_xfermode(struct ata_port *ap, | 68 | static unsigned int ata_dev_set_xfermode(struct ata_port *ap, |
70 | struct ata_device *dev); | 69 | struct ata_device *dev); |
71 | static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); | 70 | static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev); |
@@ -77,6 +76,10 @@ int atapi_enabled = 1; | |||
77 | module_param(atapi_enabled, int, 0444); | 76 | module_param(atapi_enabled, int, 0444); |
78 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); | 77 | MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)"); |
79 | 78 | ||
79 | int atapi_dmadir = 0; | ||
80 | module_param(atapi_dmadir, int, 0444); | ||
81 | MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)"); | ||
82 | |||
80 | int libata_fua = 0; | 83 | int libata_fua = 0; |
81 | module_param_named(fua, libata_fua, int, 0444); | 84 | module_param_named(fua, libata_fua, int, 0444); |
82 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); | 85 | MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)"); |
@@ -278,7 +281,7 @@ static void ata_unpack_xfermask(unsigned int xfer_mask, | |||
278 | } | 281 | } |
279 | 282 | ||
280 | static const struct ata_xfer_ent { | 283 | static const struct ata_xfer_ent { |
281 | unsigned int shift, bits; | 284 | int shift, bits; |
282 | u8 base; | 285 | u8 base; |
283 | } ata_xfer_tbl[] = { | 286 | } ata_xfer_tbl[] = { |
284 | { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, | 287 | { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 }, |
@@ -397,9 +400,21 @@ static const char *ata_mode_string(unsigned int xfer_mask) | |||
397 | return "<n/a>"; | 400 | return "<n/a>"; |
398 | } | 401 | } |
399 | 402 | ||
400 | static void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) | 403 | static const char *sata_spd_string(unsigned int spd) |
401 | { | 404 | { |
402 | if (ata_dev_present(dev)) { | 405 | static const char * const spd_str[] = { |
406 | "1.5 Gbps", | ||
407 | "3.0 Gbps", | ||
408 | }; | ||
409 | |||
410 | if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str)) | ||
411 | return "<unknown>"; | ||
412 | return spd_str[spd - 1]; | ||
413 | } | ||
414 | |||
415 | void ata_dev_disable(struct ata_port *ap, struct ata_device *dev) | ||
416 | { | ||
417 | if (ata_dev_enabled(dev)) { | ||
403 | printk(KERN_WARNING "ata%u: dev %u disabled\n", | 418 | printk(KERN_WARNING "ata%u: dev %u disabled\n", |
404 | ap->id, dev->devno); | 419 | ap->id, dev->devno); |
405 | dev->class++; | 420 | dev->class++; |
@@ -949,6 +964,7 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc) | |||
949 | * @ap: Port to which the command is sent | 964 | * @ap: Port to which the command is sent |
950 | * @dev: Device to which the command is sent | 965 | * @dev: Device to which the command is sent |
951 | * @tf: Taskfile registers for the command and the result | 966 | * @tf: Taskfile registers for the command and the result |
967 | * @cdb: CDB for packet command | ||
952 | * @dma_dir: Data tranfer direction of the command | 968 | * @dma_dir: Data tranfer direction of the command |
953 | * @buf: Data buffer of the command | 969 | * @buf: Data buffer of the command |
954 | * @buflen: Length of data buffer | 970 | * @buflen: Length of data buffer |
@@ -963,10 +979,9 @@ void ata_qc_complete_internal(struct ata_queued_cmd *qc) | |||
963 | * None. Should be called with kernel context, might sleep. | 979 | * None. Should be called with kernel context, might sleep. |
964 | */ | 980 | */ |
965 | 981 | ||
966 | static unsigned | 982 | unsigned ata_exec_internal(struct ata_port *ap, struct ata_device *dev, |
967 | ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | 983 | struct ata_taskfile *tf, const u8 *cdb, |
968 | struct ata_taskfile *tf, | 984 | int dma_dir, void *buf, unsigned int buflen) |
969 | int dma_dir, void *buf, unsigned int buflen) | ||
970 | { | 985 | { |
971 | u8 command = tf->command; | 986 | u8 command = tf->command; |
972 | struct ata_queued_cmd *qc; | 987 | struct ata_queued_cmd *qc; |
@@ -980,6 +995,8 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
980 | BUG_ON(qc == NULL); | 995 | BUG_ON(qc == NULL); |
981 | 996 | ||
982 | qc->tf = *tf; | 997 | qc->tf = *tf; |
998 | if (cdb) | ||
999 | memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); | ||
983 | qc->dma_dir = dma_dir; | 1000 | qc->dma_dir = dma_dir; |
984 | if (dma_dir != DMA_NONE) { | 1001 | if (dma_dir != DMA_NONE) { |
985 | ata_sg_init_one(qc, buf, buflen); | 1002 | ata_sg_init_one(qc, buf, buflen); |
@@ -989,9 +1006,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
989 | qc->private_data = &wait; | 1006 | qc->private_data = &wait; |
990 | qc->complete_fn = ata_qc_complete_internal; | 1007 | qc->complete_fn = ata_qc_complete_internal; |
991 | 1008 | ||
992 | qc->err_mask = ata_qc_issue(qc); | 1009 | ata_qc_issue(qc); |
993 | if (qc->err_mask) | ||
994 | ata_qc_complete(qc); | ||
995 | 1010 | ||
996 | spin_unlock_irqrestore(&ap->host_set->lock, flags); | 1011 | spin_unlock_irqrestore(&ap->host_set->lock, flags); |
997 | 1012 | ||
@@ -1032,7 +1047,7 @@ ata_exec_internal(struct ata_port *ap, struct ata_device *dev, | |||
1032 | * | 1047 | * |
1033 | * Kill the following code as soon as those drivers are fixed. | 1048 | * Kill the following code as soon as those drivers are fixed. |
1034 | */ | 1049 | */ |
1035 | if (ap->flags & ATA_FLAG_PORT_DISABLED) { | 1050 | if (ap->flags & ATA_FLAG_DISABLED) { |
1036 | err_mask |= AC_ERR_SYSTEM; | 1051 | err_mask |= AC_ERR_SYSTEM; |
1037 | ata_port_probe(ap); | 1052 | ata_port_probe(ap); |
1038 | } | 1053 | } |
@@ -1131,7 +1146,7 @@ static int ata_dev_read_id(struct ata_port *ap, struct ata_device *dev, | |||
1131 | 1146 | ||
1132 | tf.protocol = ATA_PROT_PIO; | 1147 | tf.protocol = ATA_PROT_PIO; |
1133 | 1148 | ||
1134 | err_mask = ata_exec_internal(ap, dev, &tf, DMA_FROM_DEVICE, | 1149 | err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_FROM_DEVICE, |
1135 | id, sizeof(id[0]) * ATA_ID_WORDS); | 1150 | id, sizeof(id[0]) * ATA_ID_WORDS); |
1136 | if (err_mask) { | 1151 | if (err_mask) { |
1137 | rc = -EIO; | 1152 | rc = -EIO; |
@@ -1212,7 +1227,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, | |||
1212 | unsigned int xfer_mask; | 1227 | unsigned int xfer_mask; |
1213 | int i, rc; | 1228 | int i, rc; |
1214 | 1229 | ||
1215 | if (!ata_dev_present(dev)) { | 1230 | if (!ata_dev_enabled(dev)) { |
1216 | DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", | 1231 | DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n", |
1217 | ap->id, dev->devno); | 1232 | ap->id, dev->devno); |
1218 | return 0; | 1233 | return 0; |
@@ -1228,7 +1243,7 @@ static int ata_dev_configure(struct ata_port *ap, struct ata_device *dev, | |||
1228 | id[84], id[85], id[86], id[87], id[88]); | 1243 | id[84], id[85], id[86], id[87], id[88]); |
1229 | 1244 | ||
1230 | /* initialize to-be-configured parameters */ | 1245 | /* initialize to-be-configured parameters */ |
1231 | dev->flags = 0; | 1246 | dev->flags &= ~ATA_DFLAG_CFG_MASK; |
1232 | dev->max_sectors = 0; | 1247 | dev->max_sectors = 0; |
1233 | dev->cdb_len = 0; | 1248 | dev->cdb_len = 0; |
1234 | dev->n_sectors = 0; | 1249 | dev->n_sectors = 0; |
@@ -1351,16 +1366,24 @@ err_out_nosup: | |||
1351 | * PCI/etc. bus probe sem. | 1366 | * PCI/etc. bus probe sem. |
1352 | * | 1367 | * |
1353 | * RETURNS: | 1368 | * RETURNS: |
1354 | * Zero on success, non-zero on error. | 1369 | * Zero on success, negative errno otherwise. |
1355 | */ | 1370 | */ |
1356 | 1371 | ||
1357 | static int ata_bus_probe(struct ata_port *ap) | 1372 | static int ata_bus_probe(struct ata_port *ap) |
1358 | { | 1373 | { |
1359 | unsigned int classes[ATA_MAX_DEVICES]; | 1374 | unsigned int classes[ATA_MAX_DEVICES]; |
1360 | unsigned int i, rc, found = 0; | 1375 | int tries[ATA_MAX_DEVICES]; |
1376 | int i, rc, down_xfermask; | ||
1377 | struct ata_device *dev; | ||
1361 | 1378 | ||
1362 | ata_port_probe(ap); | 1379 | ata_port_probe(ap); |
1363 | 1380 | ||
1381 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
1382 | tries[i] = ATA_PROBE_MAX_TRIES; | ||
1383 | |||
1384 | retry: | ||
1385 | down_xfermask = 0; | ||
1386 | |||
1364 | /* reset and determine device classes */ | 1387 | /* reset and determine device classes */ |
1365 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 1388 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
1366 | classes[i] = ATA_DEV_UNKNOWN; | 1389 | classes[i] = ATA_DEV_UNKNOWN; |
@@ -1374,7 +1397,7 @@ static int ata_bus_probe(struct ata_port *ap) | |||
1374 | } else { | 1397 | } else { |
1375 | ap->ops->phy_reset(ap); | 1398 | ap->ops->phy_reset(ap); |
1376 | 1399 | ||
1377 | if (!(ap->flags & ATA_FLAG_PORT_DISABLED)) | 1400 | if (!(ap->flags & ATA_FLAG_DISABLED)) |
1378 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 1401 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
1379 | classes[i] = ap->device[i].class; | 1402 | classes[i] = ap->device[i].class; |
1380 | 1403 | ||
@@ -1387,43 +1410,76 @@ static int ata_bus_probe(struct ata_port *ap) | |||
1387 | 1410 | ||
1388 | /* read IDENTIFY page and configure devices */ | 1411 | /* read IDENTIFY page and configure devices */ |
1389 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 1412 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
1390 | struct ata_device *dev = &ap->device[i]; | 1413 | dev = &ap->device[i]; |
1391 | 1414 | ||
1392 | dev->class = classes[i]; | 1415 | if (tries[i]) |
1416 | dev->class = classes[i]; | ||
1393 | 1417 | ||
1394 | if (!ata_dev_present(dev)) | 1418 | if (!ata_dev_enabled(dev)) |
1395 | continue; | 1419 | continue; |
1396 | 1420 | ||
1397 | WARN_ON(dev->id != NULL); | 1421 | kfree(dev->id); |
1398 | if (ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id)) { | 1422 | dev->id = NULL; |
1399 | dev->class = ATA_DEV_NONE; | 1423 | rc = ata_dev_read_id(ap, dev, &dev->class, 1, &dev->id); |
1400 | continue; | 1424 | if (rc) |
1401 | } | 1425 | goto fail; |
1402 | 1426 | ||
1403 | if (ata_dev_configure(ap, dev, 1)) { | 1427 | rc = ata_dev_configure(ap, dev, 1); |
1404 | ata_dev_disable(ap, dev); | 1428 | if (rc) |
1405 | continue; | 1429 | goto fail; |
1406 | } | 1430 | } |
1407 | 1431 | ||
1408 | found = 1; | 1432 | /* configure transfer mode */ |
1433 | if (ap->ops->set_mode) { | ||
1434 | /* FIXME: make ->set_mode handle no device case and | ||
1435 | * return error code and failing device on failure as | ||
1436 | * ata_set_mode() does. | ||
1437 | */ | ||
1438 | for (i = 0; i < ATA_MAX_DEVICES; i++) | ||
1439 | if (ata_dev_enabled(&ap->device[i])) { | ||
1440 | ap->ops->set_mode(ap); | ||
1441 | break; | ||
1442 | } | ||
1443 | rc = 0; | ||
1444 | } else | ||
1445 | rc = ata_set_mode(ap, &dev); | ||
1446 | |||
1447 | if (rc) { | ||
1448 | down_xfermask = 1; | ||
1449 | goto fail; | ||
1409 | } | 1450 | } |
1410 | 1451 | ||
1411 | if (!found) | 1452 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
1412 | goto err_out_disable; | 1453 | if (ata_dev_enabled(&ap->device[i])) |
1454 | return 0; | ||
1413 | 1455 | ||
1414 | if (ap->ops->set_mode) | 1456 | /* no device present, disable port */ |
1415 | ap->ops->set_mode(ap); | 1457 | ata_port_disable(ap); |
1416 | else | 1458 | ap->ops->port_disable(ap); |
1417 | ata_set_mode(ap); | 1459 | return -ENODEV; |
1418 | 1460 | ||
1419 | if (ap->flags & ATA_FLAG_PORT_DISABLED) | 1461 | fail: |
1420 | goto err_out_disable; | 1462 | switch (rc) { |
1463 | case -EINVAL: | ||
1464 | case -ENODEV: | ||
1465 | tries[dev->devno] = 0; | ||
1466 | break; | ||
1467 | case -EIO: | ||
1468 | ata_down_sata_spd_limit(ap); | ||
1469 | /* fall through */ | ||
1470 | default: | ||
1471 | tries[dev->devno]--; | ||
1472 | if (down_xfermask && | ||
1473 | ata_down_xfermask_limit(ap, dev, tries[dev->devno] == 1)) | ||
1474 | tries[dev->devno] = 0; | ||
1475 | } | ||
1421 | 1476 | ||
1422 | return 0; | 1477 | if (!tries[dev->devno]) { |
1478 | ata_down_xfermask_limit(ap, dev, 1); | ||
1479 | ata_dev_disable(ap, dev); | ||
1480 | } | ||
1423 | 1481 | ||
1424 | err_out_disable: | 1482 | goto retry; |
1425 | ap->ops->port_disable(ap); | ||
1426 | return -1; | ||
1427 | } | 1483 | } |
1428 | 1484 | ||
1429 | /** | 1485 | /** |
@@ -1439,7 +1495,7 @@ err_out_disable: | |||
1439 | 1495 | ||
1440 | void ata_port_probe(struct ata_port *ap) | 1496 | void ata_port_probe(struct ata_port *ap) |
1441 | { | 1497 | { |
1442 | ap->flags &= ~ATA_FLAG_PORT_DISABLED; | 1498 | ap->flags &= ~ATA_FLAG_DISABLED; |
1443 | } | 1499 | } |
1444 | 1500 | ||
1445 | /** | 1501 | /** |
@@ -1453,27 +1509,23 @@ void ata_port_probe(struct ata_port *ap) | |||
1453 | */ | 1509 | */ |
1454 | static void sata_print_link_status(struct ata_port *ap) | 1510 | static void sata_print_link_status(struct ata_port *ap) |
1455 | { | 1511 | { |
1456 | u32 sstatus, tmp; | 1512 | u32 sstatus, scontrol, tmp; |
1457 | const char *speed; | ||
1458 | 1513 | ||
1459 | if (!ap->ops->scr_read) | 1514 | if (!ap->ops->scr_read) |
1460 | return; | 1515 | return; |
1461 | 1516 | ||
1462 | sstatus = scr_read(ap, SCR_STATUS); | 1517 | sstatus = scr_read(ap, SCR_STATUS); |
1518 | scontrol = scr_read(ap, SCR_CONTROL); | ||
1463 | 1519 | ||
1464 | if (sata_dev_present(ap)) { | 1520 | if (sata_dev_present(ap)) { |
1465 | tmp = (sstatus >> 4) & 0xf; | 1521 | tmp = (sstatus >> 4) & 0xf; |
1466 | if (tmp & (1 << 0)) | 1522 | printk(KERN_INFO |
1467 | speed = "1.5"; | 1523 | "ata%u: SATA link up %s (SStatus %X SControl %X)\n", |
1468 | else if (tmp & (1 << 1)) | 1524 | ap->id, sata_spd_string(tmp), sstatus, scontrol); |
1469 | speed = "3.0"; | ||
1470 | else | ||
1471 | speed = "<unknown>"; | ||
1472 | printk(KERN_INFO "ata%u: SATA link up %s Gbps (SStatus %X)\n", | ||
1473 | ap->id, speed, sstatus); | ||
1474 | } else { | 1525 | } else { |
1475 | printk(KERN_INFO "ata%u: SATA link down (SStatus %X)\n", | 1526 | printk(KERN_INFO |
1476 | ap->id, sstatus); | 1527 | "ata%u: SATA link down (SStatus %X SControl %X)\n", |
1528 | ap->id, sstatus, scontrol); | ||
1477 | } | 1529 | } |
1478 | } | 1530 | } |
1479 | 1531 | ||
@@ -1520,7 +1572,7 @@ void __sata_phy_reset(struct ata_port *ap) | |||
1520 | else | 1572 | else |
1521 | ata_port_disable(ap); | 1573 | ata_port_disable(ap); |
1522 | 1574 | ||
1523 | if (ap->flags & ATA_FLAG_PORT_DISABLED) | 1575 | if (ap->flags & ATA_FLAG_DISABLED) |
1524 | return; | 1576 | return; |
1525 | 1577 | ||
1526 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { | 1578 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { |
@@ -1545,7 +1597,7 @@ void __sata_phy_reset(struct ata_port *ap) | |||
1545 | void sata_phy_reset(struct ata_port *ap) | 1597 | void sata_phy_reset(struct ata_port *ap) |
1546 | { | 1598 | { |
1547 | __sata_phy_reset(ap); | 1599 | __sata_phy_reset(ap); |
1548 | if (ap->flags & ATA_FLAG_PORT_DISABLED) | 1600 | if (ap->flags & ATA_FLAG_DISABLED) |
1549 | return; | 1601 | return; |
1550 | ata_bus_reset(ap); | 1602 | ata_bus_reset(ap); |
1551 | } | 1603 | } |
@@ -1562,7 +1614,7 @@ void sata_phy_reset(struct ata_port *ap) | |||
1562 | struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) | 1614 | struct ata_device *ata_dev_pair(struct ata_port *ap, struct ata_device *adev) |
1563 | { | 1615 | { |
1564 | struct ata_device *pair = &ap->device[1 - adev->devno]; | 1616 | struct ata_device *pair = &ap->device[1 - adev->devno]; |
1565 | if (!ata_dev_present(pair)) | 1617 | if (!ata_dev_enabled(pair)) |
1566 | return NULL; | 1618 | return NULL; |
1567 | return pair; | 1619 | return pair; |
1568 | } | 1620 | } |
@@ -1584,7 +1636,121 @@ void ata_port_disable(struct ata_port *ap) | |||
1584 | { | 1636 | { |
1585 | ap->device[0].class = ATA_DEV_NONE; | 1637 | ap->device[0].class = ATA_DEV_NONE; |
1586 | ap->device[1].class = ATA_DEV_NONE; | 1638 | ap->device[1].class = ATA_DEV_NONE; |
1587 | ap->flags |= ATA_FLAG_PORT_DISABLED; | 1639 | ap->flags |= ATA_FLAG_DISABLED; |
1640 | } | ||
1641 | |||
1642 | /** | ||
1643 | * ata_down_sata_spd_limit - adjust SATA spd limit downward | ||
1644 | * @ap: Port to adjust SATA spd limit for | ||
1645 | * | ||
1646 | * Adjust SATA spd limit of @ap downward. Note that this | ||
1647 | * function only adjusts the limit. The change must be applied | ||
1648 | * using ata_set_sata_spd(). | ||
1649 | * | ||
1650 | * LOCKING: | ||
1651 | * Inherited from caller. | ||
1652 | * | ||
1653 | * RETURNS: | ||
1654 | * 0 on success, negative errno on failure | ||
1655 | */ | ||
1656 | int ata_down_sata_spd_limit(struct ata_port *ap) | ||
1657 | { | ||
1658 | u32 spd, mask; | ||
1659 | int highbit; | ||
1660 | |||
1661 | if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) | ||
1662 | return -EOPNOTSUPP; | ||
1663 | |||
1664 | mask = ap->sata_spd_limit; | ||
1665 | if (mask <= 1) | ||
1666 | return -EINVAL; | ||
1667 | highbit = fls(mask) - 1; | ||
1668 | mask &= ~(1 << highbit); | ||
1669 | |||
1670 | spd = (scr_read(ap, SCR_STATUS) >> 4) & 0xf; | ||
1671 | if (spd <= 1) | ||
1672 | return -EINVAL; | ||
1673 | spd--; | ||
1674 | mask &= (1 << spd) - 1; | ||
1675 | if (!mask) | ||
1676 | return -EINVAL; | ||
1677 | |||
1678 | ap->sata_spd_limit = mask; | ||
1679 | |||
1680 | printk(KERN_WARNING "ata%u: limiting SATA link speed to %s\n", | ||
1681 | ap->id, sata_spd_string(fls(mask))); | ||
1682 | |||
1683 | return 0; | ||
1684 | } | ||
1685 | |||
1686 | static int __ata_set_sata_spd_needed(struct ata_port *ap, u32 *scontrol) | ||
1687 | { | ||
1688 | u32 spd, limit; | ||
1689 | |||
1690 | if (ap->sata_spd_limit == UINT_MAX) | ||
1691 | limit = 0; | ||
1692 | else | ||
1693 | limit = fls(ap->sata_spd_limit); | ||
1694 | |||
1695 | spd = (*scontrol >> 4) & 0xf; | ||
1696 | *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4); | ||
1697 | |||
1698 | return spd != limit; | ||
1699 | } | ||
1700 | |||
1701 | /** | ||
1702 | * ata_set_sata_spd_needed - is SATA spd configuration needed | ||
1703 | * @ap: Port in question | ||
1704 | * | ||
1705 | * Test whether the spd limit in SControl matches | ||
1706 | * @ap->sata_spd_limit. This function is used to determine | ||
1707 | * whether hardreset is necessary to apply SATA spd | ||
1708 | * configuration. | ||
1709 | * | ||
1710 | * LOCKING: | ||
1711 | * Inherited from caller. | ||
1712 | * | ||
1713 | * RETURNS: | ||
1714 | * 1 if SATA spd configuration is needed, 0 otherwise. | ||
1715 | */ | ||
1716 | int ata_set_sata_spd_needed(struct ata_port *ap) | ||
1717 | { | ||
1718 | u32 scontrol; | ||
1719 | |||
1720 | if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) | ||
1721 | return 0; | ||
1722 | |||
1723 | scontrol = scr_read(ap, SCR_CONTROL); | ||
1724 | |||
1725 | return __ata_set_sata_spd_needed(ap, &scontrol); | ||
1726 | } | ||
1727 | |||
1728 | /** | ||
1729 | * ata_set_sata_spd - set SATA spd according to spd limit | ||
1730 | * @ap: Port to set SATA spd for | ||
1731 | * | ||
1732 | * Set SATA spd of @ap according to sata_spd_limit. | ||
1733 | * | ||
1734 | * LOCKING: | ||
1735 | * Inherited from caller. | ||
1736 | * | ||
1737 | * RETURNS: | ||
1738 | * 0 if spd doesn't need to be changed, 1 if spd has been | ||
1739 | * changed. -EOPNOTSUPP if SCR registers are inaccessible. | ||
1740 | */ | ||
1741 | int ata_set_sata_spd(struct ata_port *ap) | ||
1742 | { | ||
1743 | u32 scontrol; | ||
1744 | |||
1745 | if (ap->cbl != ATA_CBL_SATA || !ap->ops->scr_read) | ||
1746 | return -EOPNOTSUPP; | ||
1747 | |||
1748 | scontrol = scr_read(ap, SCR_CONTROL); | ||
1749 | if (!__ata_set_sata_spd_needed(ap, &scontrol)) | ||
1750 | return 0; | ||
1751 | |||
1752 | scr_write(ap, SCR_CONTROL, scontrol); | ||
1753 | return 1; | ||
1588 | } | 1754 | } |
1589 | 1755 | ||
1590 | /* | 1756 | /* |
@@ -1735,11 +1901,62 @@ int ata_timing_compute(struct ata_device *adev, unsigned short speed, | |||
1735 | return 0; | 1901 | return 0; |
1736 | } | 1902 | } |
1737 | 1903 | ||
1904 | /** | ||
1905 | * ata_down_xfermask_limit - adjust dev xfer masks downward | ||
1906 | * @ap: Port associated with device @dev | ||
1907 | * @dev: Device to adjust xfer masks | ||
1908 | * @force_pio0: Force PIO0 | ||
1909 | * | ||
1910 | * Adjust xfer masks of @dev downward. Note that this function | ||
1911 | * does not apply the change. Invoking ata_set_mode() afterwards | ||
1912 | * will apply the limit. | ||
1913 | * | ||
1914 | * LOCKING: | ||
1915 | * Inherited from caller. | ||
1916 | * | ||
1917 | * RETURNS: | ||
1918 | * 0 on success, negative errno on failure | ||
1919 | */ | ||
1920 | int ata_down_xfermask_limit(struct ata_port *ap, struct ata_device *dev, | ||
1921 | int force_pio0) | ||
1922 | { | ||
1923 | unsigned long xfer_mask; | ||
1924 | int highbit; | ||
1925 | |||
1926 | xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask, | ||
1927 | dev->udma_mask); | ||
1928 | |||
1929 | if (!xfer_mask) | ||
1930 | goto fail; | ||
1931 | /* don't gear down to MWDMA from UDMA, go directly to PIO */ | ||
1932 | if (xfer_mask & ATA_MASK_UDMA) | ||
1933 | xfer_mask &= ~ATA_MASK_MWDMA; | ||
1934 | |||
1935 | highbit = fls(xfer_mask) - 1; | ||
1936 | xfer_mask &= ~(1 << highbit); | ||
1937 | if (force_pio0) | ||
1938 | xfer_mask &= 1 << ATA_SHIFT_PIO; | ||
1939 | if (!xfer_mask) | ||
1940 | goto fail; | ||
1941 | |||
1942 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, | ||
1943 | &dev->udma_mask); | ||
1944 | |||
1945 | printk(KERN_WARNING "ata%u: dev %u limiting speed to %s\n", | ||
1946 | ap->id, dev->devno, ata_mode_string(xfer_mask)); | ||
1947 | |||
1948 | return 0; | ||
1949 | |||
1950 | fail: | ||
1951 | return -EINVAL; | ||
1952 | } | ||
1953 | |||
1738 | static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) | 1954 | static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) |
1739 | { | 1955 | { |
1740 | unsigned int err_mask; | 1956 | unsigned int err_mask; |
1741 | int rc; | 1957 | int rc; |
1742 | 1958 | ||
1959 | dev->flags &= ~ATA_DFLAG_PIO; | ||
1743 | if (dev->xfer_shift == ATA_SHIFT_PIO) | 1960 | if (dev->xfer_shift == ATA_SHIFT_PIO) |
1744 | dev->flags |= ATA_DFLAG_PIO; | 1961 | dev->flags |= ATA_DFLAG_PIO; |
1745 | 1962 | ||
@@ -1752,12 +1969,8 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) | |||
1752 | } | 1969 | } |
1753 | 1970 | ||
1754 | rc = ata_dev_revalidate(ap, dev, 0); | 1971 | rc = ata_dev_revalidate(ap, dev, 0); |
1755 | if (rc) { | 1972 | if (rc) |
1756 | printk(KERN_ERR | ||
1757 | "ata%u: failed to revalidate after set xfermode\n", | ||
1758 | ap->id); | ||
1759 | return rc; | 1973 | return rc; |
1760 | } | ||
1761 | 1974 | ||
1762 | DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", | 1975 | DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n", |
1763 | dev->xfer_shift, (int)dev->xfer_mode); | 1976 | dev->xfer_shift, (int)dev->xfer_mode); |
@@ -1768,118 +1981,107 @@ static int ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev) | |||
1768 | return 0; | 1981 | return 0; |
1769 | } | 1982 | } |
1770 | 1983 | ||
1771 | static int ata_host_set_pio(struct ata_port *ap) | ||
1772 | { | ||
1773 | int i; | ||
1774 | |||
1775 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
1776 | struct ata_device *dev = &ap->device[i]; | ||
1777 | |||
1778 | if (!ata_dev_present(dev)) | ||
1779 | continue; | ||
1780 | |||
1781 | if (!dev->pio_mode) { | ||
1782 | printk(KERN_WARNING "ata%u: no PIO support for device %d.\n", ap->id, i); | ||
1783 | return -1; | ||
1784 | } | ||
1785 | |||
1786 | dev->xfer_mode = dev->pio_mode; | ||
1787 | dev->xfer_shift = ATA_SHIFT_PIO; | ||
1788 | if (ap->ops->set_piomode) | ||
1789 | ap->ops->set_piomode(ap, dev); | ||
1790 | } | ||
1791 | |||
1792 | return 0; | ||
1793 | } | ||
1794 | |||
1795 | static void ata_host_set_dma(struct ata_port *ap) | ||
1796 | { | ||
1797 | int i; | ||
1798 | |||
1799 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | ||
1800 | struct ata_device *dev = &ap->device[i]; | ||
1801 | |||
1802 | if (!ata_dev_present(dev) || !dev->dma_mode) | ||
1803 | continue; | ||
1804 | |||
1805 | dev->xfer_mode = dev->dma_mode; | ||
1806 | dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); | ||
1807 | if (ap->ops->set_dmamode) | ||
1808 | ap->ops->set_dmamode(ap, dev); | ||
1809 | } | ||
1810 | } | ||
1811 | |||
1812 | /** | 1984 | /** |
1813 | * ata_set_mode - Program timings and issue SET FEATURES - XFER | 1985 | * ata_set_mode - Program timings and issue SET FEATURES - XFER |
1814 | * @ap: port on which timings will be programmed | 1986 | * @ap: port on which timings will be programmed |
1987 | * @r_failed_dev: out paramter for failed device | ||
1815 | * | 1988 | * |
1816 | * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). | 1989 | * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If |
1990 | * ata_set_mode() fails, pointer to the failing device is | ||
1991 | * returned in @r_failed_dev. | ||
1817 | * | 1992 | * |
1818 | * LOCKING: | 1993 | * LOCKING: |
1819 | * PCI/etc. bus probe sem. | 1994 | * PCI/etc. bus probe sem. |
1995 | * | ||
1996 | * RETURNS: | ||
1997 | * 0 on success, negative errno otherwise | ||
1820 | */ | 1998 | */ |
1821 | static void ata_set_mode(struct ata_port *ap) | 1999 | int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev) |
1822 | { | 2000 | { |
1823 | int i, rc, used_dma = 0; | 2001 | struct ata_device *dev; |
2002 | int i, rc = 0, used_dma = 0, found = 0; | ||
1824 | 2003 | ||
1825 | /* step 1: calculate xfer_mask */ | 2004 | /* step 1: calculate xfer_mask */ |
1826 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2005 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
1827 | struct ata_device *dev = &ap->device[i]; | ||
1828 | unsigned int pio_mask, dma_mask; | 2006 | unsigned int pio_mask, dma_mask; |
1829 | 2007 | ||
1830 | if (!ata_dev_present(dev)) | 2008 | dev = &ap->device[i]; |
2009 | |||
2010 | if (!ata_dev_enabled(dev)) | ||
1831 | continue; | 2011 | continue; |
1832 | 2012 | ||
1833 | ata_dev_xfermask(ap, dev); | 2013 | ata_dev_xfermask(ap, dev); |
1834 | 2014 | ||
1835 | /* TODO: let LLDD filter dev->*_mask here */ | ||
1836 | |||
1837 | pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); | 2015 | pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0); |
1838 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); | 2016 | dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask); |
1839 | dev->pio_mode = ata_xfer_mask2mode(pio_mask); | 2017 | dev->pio_mode = ata_xfer_mask2mode(pio_mask); |
1840 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); | 2018 | dev->dma_mode = ata_xfer_mask2mode(dma_mask); |
1841 | 2019 | ||
2020 | found = 1; | ||
1842 | if (dev->dma_mode) | 2021 | if (dev->dma_mode) |
1843 | used_dma = 1; | 2022 | used_dma = 1; |
1844 | } | 2023 | } |
2024 | if (!found) | ||
2025 | goto out; | ||
1845 | 2026 | ||
1846 | /* step 2: always set host PIO timings */ | 2027 | /* step 2: always set host PIO timings */ |
1847 | rc = ata_host_set_pio(ap); | 2028 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
1848 | if (rc) | 2029 | dev = &ap->device[i]; |
1849 | goto err_out; | 2030 | if (!ata_dev_enabled(dev)) |
2031 | continue; | ||
2032 | |||
2033 | if (!dev->pio_mode) { | ||
2034 | printk(KERN_WARNING "ata%u: dev %u no PIO support\n", | ||
2035 | ap->id, dev->devno); | ||
2036 | rc = -EINVAL; | ||
2037 | goto out; | ||
2038 | } | ||
2039 | |||
2040 | dev->xfer_mode = dev->pio_mode; | ||
2041 | dev->xfer_shift = ATA_SHIFT_PIO; | ||
2042 | if (ap->ops->set_piomode) | ||
2043 | ap->ops->set_piomode(ap, dev); | ||
2044 | } | ||
1850 | 2045 | ||
1851 | /* step 3: set host DMA timings */ | 2046 | /* step 3: set host DMA timings */ |
1852 | ata_host_set_dma(ap); | 2047 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
2048 | dev = &ap->device[i]; | ||
2049 | |||
2050 | if (!ata_dev_enabled(dev) || !dev->dma_mode) | ||
2051 | continue; | ||
2052 | |||
2053 | dev->xfer_mode = dev->dma_mode; | ||
2054 | dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode); | ||
2055 | if (ap->ops->set_dmamode) | ||
2056 | ap->ops->set_dmamode(ap, dev); | ||
2057 | } | ||
1853 | 2058 | ||
1854 | /* step 4: update devices' xfer mode */ | 2059 | /* step 4: update devices' xfer mode */ |
1855 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2060 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
1856 | struct ata_device *dev = &ap->device[i]; | 2061 | dev = &ap->device[i]; |
1857 | 2062 | ||
1858 | if (!ata_dev_present(dev)) | 2063 | if (!ata_dev_enabled(dev)) |
1859 | continue; | 2064 | continue; |
1860 | 2065 | ||
1861 | if (ata_dev_set_mode(ap, dev)) | 2066 | rc = ata_dev_set_mode(ap, dev); |
1862 | goto err_out; | 2067 | if (rc) |
2068 | goto out; | ||
1863 | } | 2069 | } |
1864 | 2070 | ||
1865 | /* | 2071 | /* Record simplex status. If we selected DMA then the other |
1866 | * Record simplex status. If we selected DMA then the other | 2072 | * host channels are not permitted to do so. |
1867 | * host channels are not permitted to do so. | ||
1868 | */ | 2073 | */ |
1869 | |||
1870 | if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) | 2074 | if (used_dma && (ap->host_set->flags & ATA_HOST_SIMPLEX)) |
1871 | ap->host_set->simplex_claimed = 1; | 2075 | ap->host_set->simplex_claimed = 1; |
1872 | 2076 | ||
1873 | /* | 2077 | /* step5: chip specific finalisation */ |
1874 | * Chip specific finalisation | ||
1875 | */ | ||
1876 | if (ap->ops->post_set_mode) | 2078 | if (ap->ops->post_set_mode) |
1877 | ap->ops->post_set_mode(ap); | 2079 | ap->ops->post_set_mode(ap); |
1878 | 2080 | ||
1879 | return; | 2081 | out: |
1880 | 2082 | if (rc) | |
1881 | err_out: | 2083 | *r_failed_dev = dev; |
1882 | ata_port_disable(ap); | 2084 | return rc; |
1883 | } | 2085 | } |
1884 | 2086 | ||
1885 | /** | 2087 | /** |
@@ -2032,8 +2234,10 @@ static unsigned int ata_bus_softreset(struct ata_port *ap, | |||
2032 | * the bus shows 0xFF because the odd clown forgets the D7 | 2234 | * the bus shows 0xFF because the odd clown forgets the D7 |
2033 | * pulldown resistor. | 2235 | * pulldown resistor. |
2034 | */ | 2236 | */ |
2035 | if (ata_check_status(ap) == 0xFF) | 2237 | if (ata_check_status(ap) == 0xFF) { |
2238 | printk(KERN_ERR "ata%u: SRST failed (status 0xFF)\n", ap->id); | ||
2036 | return AC_ERR_OTHER; | 2239 | return AC_ERR_OTHER; |
2240 | } | ||
2037 | 2241 | ||
2038 | ata_bus_post_reset(ap, devmask); | 2242 | ata_bus_post_reset(ap, devmask); |
2039 | 2243 | ||
@@ -2057,7 +2261,7 @@ static unsigned int ata_bus_softreset(struct ata_port *ap, | |||
2057 | * Obtains host_set lock. | 2261 | * Obtains host_set lock. |
2058 | * | 2262 | * |
2059 | * SIDE EFFECTS: | 2263 | * SIDE EFFECTS: |
2060 | * Sets ATA_FLAG_PORT_DISABLED if bus reset fails. | 2264 | * Sets ATA_FLAG_DISABLED if bus reset fails. |
2061 | */ | 2265 | */ |
2062 | 2266 | ||
2063 | void ata_bus_reset(struct ata_port *ap) | 2267 | void ata_bus_reset(struct ata_port *ap) |
@@ -2134,9 +2338,11 @@ err_out: | |||
2134 | static int sata_phy_resume(struct ata_port *ap) | 2338 | static int sata_phy_resume(struct ata_port *ap) |
2135 | { | 2339 | { |
2136 | unsigned long timeout = jiffies + (HZ * 5); | 2340 | unsigned long timeout = jiffies + (HZ * 5); |
2137 | u32 sstatus; | 2341 | u32 scontrol, sstatus; |
2138 | 2342 | ||
2139 | scr_write_flush(ap, SCR_CONTROL, 0x300); | 2343 | scontrol = scr_read(ap, SCR_CONTROL); |
2344 | scontrol = (scontrol & 0x0f0) | 0x300; | ||
2345 | scr_write_flush(ap, SCR_CONTROL, scontrol); | ||
2140 | 2346 | ||
2141 | /* Wait for phy to become ready, if necessary. */ | 2347 | /* Wait for phy to become ready, if necessary. */ |
2142 | do { | 2348 | do { |
@@ -2165,7 +2371,18 @@ static int sata_phy_resume(struct ata_port *ap) | |||
2165 | void ata_std_probeinit(struct ata_port *ap) | 2371 | void ata_std_probeinit(struct ata_port *ap) |
2166 | { | 2372 | { |
2167 | if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { | 2373 | if ((ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read) { |
2374 | u32 spd; | ||
2375 | |||
2376 | /* set cable type and resume link */ | ||
2377 | ap->cbl = ATA_CBL_SATA; | ||
2168 | sata_phy_resume(ap); | 2378 | sata_phy_resume(ap); |
2379 | |||
2380 | /* init sata_spd_limit to the current value */ | ||
2381 | spd = (scr_read(ap, SCR_CONTROL) & 0xf0) >> 4; | ||
2382 | if (spd) | ||
2383 | ap->sata_spd_limit &= (1 << spd) - 1; | ||
2384 | |||
2385 | /* wait for device */ | ||
2169 | if (sata_dev_present(ap)) | 2386 | if (sata_dev_present(ap)) |
2170 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); | 2387 | ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT); |
2171 | } | 2388 | } |
@@ -2174,7 +2391,6 @@ void ata_std_probeinit(struct ata_port *ap) | |||
2174 | /** | 2391 | /** |
2175 | * ata_std_softreset - reset host port via ATA SRST | 2392 | * ata_std_softreset - reset host port via ATA SRST |
2176 | * @ap: port to reset | 2393 | * @ap: port to reset |
2177 | * @verbose: fail verbosely | ||
2178 | * @classes: resulting classes of attached devices | 2394 | * @classes: resulting classes of attached devices |
2179 | * | 2395 | * |
2180 | * Reset host port using ATA SRST. This function is to be used | 2396 | * Reset host port using ATA SRST. This function is to be used |
@@ -2186,7 +2402,7 @@ void ata_std_probeinit(struct ata_port *ap) | |||
2186 | * RETURNS: | 2402 | * RETURNS: |
2187 | * 0 on success, -errno otherwise. | 2403 | * 0 on success, -errno otherwise. |
2188 | */ | 2404 | */ |
2189 | int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) | 2405 | int ata_std_softreset(struct ata_port *ap, unsigned int *classes) |
2190 | { | 2406 | { |
2191 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; | 2407 | unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS; |
2192 | unsigned int devmask = 0, err_mask; | 2408 | unsigned int devmask = 0, err_mask; |
@@ -2212,12 +2428,8 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) | |||
2212 | DPRINTK("about to softreset, devmask=%x\n", devmask); | 2428 | DPRINTK("about to softreset, devmask=%x\n", devmask); |
2213 | err_mask = ata_bus_softreset(ap, devmask); | 2429 | err_mask = ata_bus_softreset(ap, devmask); |
2214 | if (err_mask) { | 2430 | if (err_mask) { |
2215 | if (verbose) | 2431 | printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n", |
2216 | printk(KERN_ERR "ata%u: SRST failed (err_mask=0x%x)\n", | 2432 | ap->id, err_mask); |
2217 | ap->id, err_mask); | ||
2218 | else | ||
2219 | DPRINTK("EXIT, softreset failed (err_mask=0x%x)\n", | ||
2220 | err_mask); | ||
2221 | return -EIO; | 2433 | return -EIO; |
2222 | } | 2434 | } |
2223 | 2435 | ||
@@ -2234,7 +2446,6 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) | |||
2234 | /** | 2446 | /** |
2235 | * sata_std_hardreset - reset host port via SATA phy reset | 2447 | * sata_std_hardreset - reset host port via SATA phy reset |
2236 | * @ap: port to reset | 2448 | * @ap: port to reset |
2237 | * @verbose: fail verbosely | ||
2238 | * @class: resulting class of attached device | 2449 | * @class: resulting class of attached device |
2239 | * | 2450 | * |
2240 | * SATA phy-reset host port using DET bits of SControl register. | 2451 | * SATA phy-reset host port using DET bits of SControl register. |
@@ -2247,20 +2458,36 @@ int ata_std_softreset(struct ata_port *ap, int verbose, unsigned int *classes) | |||
2247 | * RETURNS: | 2458 | * RETURNS: |
2248 | * 0 on success, -errno otherwise. | 2459 | * 0 on success, -errno otherwise. |
2249 | */ | 2460 | */ |
2250 | int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) | 2461 | int sata_std_hardreset(struct ata_port *ap, unsigned int *class) |
2251 | { | 2462 | { |
2463 | u32 scontrol; | ||
2464 | |||
2252 | DPRINTK("ENTER\n"); | 2465 | DPRINTK("ENTER\n"); |
2253 | 2466 | ||
2254 | /* Issue phy wake/reset */ | 2467 | if (ata_set_sata_spd_needed(ap)) { |
2255 | scr_write_flush(ap, SCR_CONTROL, 0x301); | 2468 | /* SATA spec says nothing about how to reconfigure |
2469 | * spd. To be on the safe side, turn off phy during | ||
2470 | * reconfiguration. This works for at least ICH7 AHCI | ||
2471 | * and Sil3124. | ||
2472 | */ | ||
2473 | scontrol = scr_read(ap, SCR_CONTROL); | ||
2474 | scontrol = (scontrol & 0x0f0) | 0x302; | ||
2475 | scr_write_flush(ap, SCR_CONTROL, scontrol); | ||
2256 | 2476 | ||
2257 | /* | 2477 | ata_set_sata_spd(ap); |
2258 | * Couldn't find anything in SATA I/II specs, but AHCI-1.1 | 2478 | } |
2479 | |||
2480 | /* issue phy wake/reset */ | ||
2481 | scontrol = scr_read(ap, SCR_CONTROL); | ||
2482 | scontrol = (scontrol & 0x0f0) | 0x301; | ||
2483 | scr_write_flush(ap, SCR_CONTROL, scontrol); | ||
2484 | |||
2485 | /* Couldn't find anything in SATA I/II specs, but AHCI-1.1 | ||
2259 | * 10.4.2 says at least 1 ms. | 2486 | * 10.4.2 says at least 1 ms. |
2260 | */ | 2487 | */ |
2261 | msleep(1); | 2488 | msleep(1); |
2262 | 2489 | ||
2263 | /* Bring phy back */ | 2490 | /* bring phy back */ |
2264 | sata_phy_resume(ap); | 2491 | sata_phy_resume(ap); |
2265 | 2492 | ||
2266 | /* TODO: phy layer with polling, timeouts, etc. */ | 2493 | /* TODO: phy layer with polling, timeouts, etc. */ |
@@ -2271,11 +2498,8 @@ int sata_std_hardreset(struct ata_port *ap, int verbose, unsigned int *class) | |||
2271 | } | 2498 | } |
2272 | 2499 | ||
2273 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { | 2500 | if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) { |
2274 | if (verbose) | 2501 | printk(KERN_ERR |
2275 | printk(KERN_ERR "ata%u: COMRESET failed " | 2502 | "ata%u: COMRESET failed (device not ready)\n", ap->id); |
2276 | "(device not ready)\n", ap->id); | ||
2277 | else | ||
2278 | DPRINTK("EXIT, device not ready\n"); | ||
2279 | return -EIO; | 2503 | return -EIO; |
2280 | } | 2504 | } |
2281 | 2505 | ||
@@ -2306,10 +2530,6 @@ void ata_std_postreset(struct ata_port *ap, unsigned int *classes) | |||
2306 | { | 2530 | { |
2307 | DPRINTK("ENTER\n"); | 2531 | DPRINTK("ENTER\n"); |
2308 | 2532 | ||
2309 | /* set cable type if it isn't already set */ | ||
2310 | if (ap->cbl == ATA_CBL_NONE && ap->flags & ATA_FLAG_SATA) | ||
2311 | ap->cbl = ATA_CBL_SATA; | ||
2312 | |||
2313 | /* print link status */ | 2533 | /* print link status */ |
2314 | if (ap->cbl == ATA_CBL_SATA) | 2534 | if (ap->cbl == ATA_CBL_SATA) |
2315 | sata_print_link_status(ap); | 2535 | sata_print_link_status(ap); |
@@ -2359,7 +2579,7 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes) | |||
2359 | ata_reset_fn_t hardreset; | 2579 | ata_reset_fn_t hardreset; |
2360 | 2580 | ||
2361 | hardreset = NULL; | 2581 | hardreset = NULL; |
2362 | if (ap->flags & ATA_FLAG_SATA && ap->ops->scr_read) | 2582 | if (ap->cbl == ATA_CBL_SATA && ap->ops->scr_read) |
2363 | hardreset = sata_std_hardreset; | 2583 | hardreset = sata_std_hardreset; |
2364 | 2584 | ||
2365 | return ata_drive_probe_reset(ap, ata_std_probeinit, | 2585 | return ata_drive_probe_reset(ap, ata_std_probeinit, |
@@ -2367,16 +2587,15 @@ int ata_std_probe_reset(struct ata_port *ap, unsigned int *classes) | |||
2367 | ata_std_postreset, classes); | 2587 | ata_std_postreset, classes); |
2368 | } | 2588 | } |
2369 | 2589 | ||
2370 | static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset, | 2590 | int ata_do_reset(struct ata_port *ap, ata_reset_fn_t reset, |
2371 | ata_postreset_fn_t postreset, | 2591 | ata_postreset_fn_t postreset, unsigned int *classes) |
2372 | unsigned int *classes) | ||
2373 | { | 2592 | { |
2374 | int i, rc; | 2593 | int i, rc; |
2375 | 2594 | ||
2376 | for (i = 0; i < ATA_MAX_DEVICES; i++) | 2595 | for (i = 0; i < ATA_MAX_DEVICES; i++) |
2377 | classes[i] = ATA_DEV_UNKNOWN; | 2596 | classes[i] = ATA_DEV_UNKNOWN; |
2378 | 2597 | ||
2379 | rc = reset(ap, 0, classes); | 2598 | rc = reset(ap, classes); |
2380 | if (rc) | 2599 | if (rc) |
2381 | return rc; | 2600 | return rc; |
2382 | 2601 | ||
@@ -2396,7 +2615,7 @@ static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset, | |||
2396 | if (postreset) | 2615 | if (postreset) |
2397 | postreset(ap, classes); | 2616 | postreset(ap, classes); |
2398 | 2617 | ||
2399 | return classes[0] != ATA_DEV_UNKNOWN ? 0 : -ENODEV; | 2618 | return 0; |
2400 | } | 2619 | } |
2401 | 2620 | ||
2402 | /** | 2621 | /** |
@@ -2420,8 +2639,6 @@ static int do_probe_reset(struct ata_port *ap, ata_reset_fn_t reset, | |||
2420 | * - If classification is supported, fill classes[] with | 2639 | * - If classification is supported, fill classes[] with |
2421 | * recognized class codes. | 2640 | * recognized class codes. |
2422 | * - If classification is not supported, leave classes[] alone. | 2641 | * - If classification is not supported, leave classes[] alone. |
2423 | * - If verbose is non-zero, print error message on failure; | ||
2424 | * otherwise, shut up. | ||
2425 | * | 2642 | * |
2426 | * LOCKING: | 2643 | * LOCKING: |
2427 | * Kernel thread context (may sleep) | 2644 | * Kernel thread context (may sleep) |
@@ -2440,22 +2657,46 @@ int ata_drive_probe_reset(struct ata_port *ap, ata_probeinit_fn_t probeinit, | |||
2440 | if (probeinit) | 2657 | if (probeinit) |
2441 | probeinit(ap); | 2658 | probeinit(ap); |
2442 | 2659 | ||
2443 | if (softreset) { | 2660 | if (softreset && !ata_set_sata_spd_needed(ap)) { |
2444 | rc = do_probe_reset(ap, softreset, postreset, classes); | 2661 | rc = ata_do_reset(ap, softreset, postreset, classes); |
2445 | if (rc == 0) | 2662 | if (rc == 0 && classes[0] != ATA_DEV_UNKNOWN) |
2446 | return 0; | 2663 | goto done; |
2664 | printk(KERN_INFO "ata%u: softreset failed, will try " | ||
2665 | "hardreset in 5 secs\n", ap->id); | ||
2666 | ssleep(5); | ||
2447 | } | 2667 | } |
2448 | 2668 | ||
2449 | if (!hardreset) | 2669 | if (!hardreset) |
2450 | return rc; | 2670 | goto done; |
2451 | 2671 | ||
2452 | rc = do_probe_reset(ap, hardreset, postreset, classes); | 2672 | while (1) { |
2453 | if (rc == 0 || rc != -ENODEV) | 2673 | rc = ata_do_reset(ap, hardreset, postreset, classes); |
2454 | return rc; | 2674 | if (rc == 0) { |
2675 | if (classes[0] != ATA_DEV_UNKNOWN) | ||
2676 | goto done; | ||
2677 | break; | ||
2678 | } | ||
2679 | |||
2680 | if (ata_down_sata_spd_limit(ap)) | ||
2681 | goto done; | ||
2682 | |||
2683 | printk(KERN_INFO "ata%u: hardreset failed, will retry " | ||
2684 | "in 5 secs\n", ap->id); | ||
2685 | ssleep(5); | ||
2686 | } | ||
2455 | 2687 | ||
2456 | if (softreset) | 2688 | if (softreset) { |
2457 | rc = do_probe_reset(ap, softreset, postreset, classes); | 2689 | printk(KERN_INFO "ata%u: hardreset succeeded without " |
2690 | "classification, will retry softreset in 5 secs\n", | ||
2691 | ap->id); | ||
2692 | ssleep(5); | ||
2458 | 2693 | ||
2694 | rc = ata_do_reset(ap, softreset, postreset, classes); | ||
2695 | } | ||
2696 | |||
2697 | done: | ||
2698 | if (rc == 0 && classes[0] == ATA_DEV_UNKNOWN) | ||
2699 | rc = -ENODEV; | ||
2459 | return rc; | 2700 | return rc; |
2460 | } | 2701 | } |
2461 | 2702 | ||
@@ -2539,15 +2780,14 @@ static int ata_dev_same_device(struct ata_port *ap, struct ata_device *dev, | |||
2539 | int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, | 2780 | int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, |
2540 | int post_reset) | 2781 | int post_reset) |
2541 | { | 2782 | { |
2542 | unsigned int class; | 2783 | unsigned int class = dev->class; |
2543 | u16 *id; | 2784 | u16 *id = NULL; |
2544 | int rc; | 2785 | int rc; |
2545 | 2786 | ||
2546 | if (!ata_dev_present(dev)) | 2787 | if (!ata_dev_enabled(dev)) { |
2547 | return -ENODEV; | 2788 | rc = -ENODEV; |
2548 | 2789 | goto fail; | |
2549 | class = dev->class; | 2790 | } |
2550 | id = NULL; | ||
2551 | 2791 | ||
2552 | /* allocate & read ID data */ | 2792 | /* allocate & read ID data */ |
2553 | rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); | 2793 | rc = ata_dev_read_id(ap, dev, &class, post_reset, &id); |
@@ -2564,7 +2804,9 @@ int ata_dev_revalidate(struct ata_port *ap, struct ata_device *dev, | |||
2564 | dev->id = id; | 2804 | dev->id = id; |
2565 | 2805 | ||
2566 | /* configure device according to the new ID */ | 2806 | /* configure device according to the new ID */ |
2567 | return ata_dev_configure(ap, dev, 0); | 2807 | rc = ata_dev_configure(ap, dev, 0); |
2808 | if (rc == 0) | ||
2809 | return 0; | ||
2568 | 2810 | ||
2569 | fail: | 2811 | fail: |
2570 | printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", | 2812 | printk(KERN_ERR "ata%u: dev %u revalidation failed (errno=%d)\n", |
@@ -2666,23 +2908,34 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) | |||
2666 | unsigned long xfer_mask; | 2908 | unsigned long xfer_mask; |
2667 | int i; | 2909 | int i; |
2668 | 2910 | ||
2669 | xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask, | 2911 | xfer_mask = ata_pack_xfermask(ap->pio_mask, |
2670 | ap->udma_mask); | 2912 | ap->mwdma_mask, ap->udma_mask); |
2913 | |||
2914 | /* Apply cable rule here. Don't apply it early because when | ||
2915 | * we handle hot plug the cable type can itself change. | ||
2916 | */ | ||
2917 | if (ap->cbl == ATA_CBL_PATA40) | ||
2918 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); | ||
2671 | 2919 | ||
2672 | /* FIXME: Use port-wide xfermask for now */ | 2920 | /* FIXME: Use port-wide xfermask for now */ |
2673 | for (i = 0; i < ATA_MAX_DEVICES; i++) { | 2921 | for (i = 0; i < ATA_MAX_DEVICES; i++) { |
2674 | struct ata_device *d = &ap->device[i]; | 2922 | struct ata_device *d = &ap->device[i]; |
2675 | if (!ata_dev_present(d)) | 2923 | |
2924 | if (ata_dev_absent(d)) | ||
2676 | continue; | 2925 | continue; |
2677 | xfer_mask &= ata_pack_xfermask(d->pio_mask, d->mwdma_mask, | 2926 | |
2678 | d->udma_mask); | 2927 | if (ata_dev_disabled(d)) { |
2928 | /* to avoid violating device selection timing */ | ||
2929 | xfer_mask &= ata_pack_xfermask(d->pio_mask, | ||
2930 | UINT_MAX, UINT_MAX); | ||
2931 | continue; | ||
2932 | } | ||
2933 | |||
2934 | xfer_mask &= ata_pack_xfermask(d->pio_mask, | ||
2935 | d->mwdma_mask, d->udma_mask); | ||
2679 | xfer_mask &= ata_id_xfermask(d->id); | 2936 | xfer_mask &= ata_id_xfermask(d->id); |
2680 | if (ata_dma_blacklisted(d)) | 2937 | if (ata_dma_blacklisted(d)) |
2681 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | 2938 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
2682 | /* Apply cable rule here. Don't apply it early because when | ||
2683 | we handle hot plug the cable type can itself change */ | ||
2684 | if (ap->cbl == ATA_CBL_PATA40) | ||
2685 | xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA); | ||
2686 | } | 2939 | } |
2687 | 2940 | ||
2688 | if (ata_dma_blacklisted(dev)) | 2941 | if (ata_dma_blacklisted(dev)) |
@@ -2693,11 +2946,12 @@ static void ata_dev_xfermask(struct ata_port *ap, struct ata_device *dev) | |||
2693 | if (hs->simplex_claimed) | 2946 | if (hs->simplex_claimed) |
2694 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); | 2947 | xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA); |
2695 | } | 2948 | } |
2949 | |||
2696 | if (ap->ops->mode_filter) | 2950 | if (ap->ops->mode_filter) |
2697 | xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); | 2951 | xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask); |
2698 | 2952 | ||
2699 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask, | 2953 | ata_unpack_xfermask(xfer_mask, &dev->pio_mask, |
2700 | &dev->udma_mask); | 2954 | &dev->mwdma_mask, &dev->udma_mask); |
2701 | } | 2955 | } |
2702 | 2956 | ||
2703 | /** | 2957 | /** |
@@ -2731,7 +2985,7 @@ static unsigned int ata_dev_set_xfermode(struct ata_port *ap, | |||
2731 | tf.protocol = ATA_PROT_NODATA; | 2985 | tf.protocol = ATA_PROT_NODATA; |
2732 | tf.nsect = dev->xfer_mode; | 2986 | tf.nsect = dev->xfer_mode; |
2733 | 2987 | ||
2734 | err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); | 2988 | err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); |
2735 | 2989 | ||
2736 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | 2990 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
2737 | return err_mask; | 2991 | return err_mask; |
@@ -2771,7 +3025,7 @@ static unsigned int ata_dev_init_params(struct ata_port *ap, | |||
2771 | tf.nsect = sectors; | 3025 | tf.nsect = sectors; |
2772 | tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ | 3026 | tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */ |
2773 | 3027 | ||
2774 | err_mask = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); | 3028 | err_mask = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); |
2775 | 3029 | ||
2776 | DPRINTK("EXIT, err_mask=%x\n", err_mask); | 3030 | DPRINTK("EXIT, err_mask=%x\n", err_mask); |
2777 | return err_mask; | 3031 | return err_mask; |
@@ -3159,7 +3413,7 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
3159 | 3413 | ||
3160 | /** | 3414 | /** |
3161 | * ata_pio_poll - poll using PIO, depending on current state | 3415 | * ata_pio_poll - poll using PIO, depending on current state |
3162 | * @ap: the target ata_port | 3416 | * @qc: qc in progress |
3163 | * | 3417 | * |
3164 | * LOCKING: | 3418 | * LOCKING: |
3165 | * None. (executing in kernel thread context) | 3419 | * None. (executing in kernel thread context) |
@@ -3167,17 +3421,13 @@ void ata_poll_qc_complete(struct ata_queued_cmd *qc) | |||
3167 | * RETURNS: | 3421 | * RETURNS: |
3168 | * timeout value to use | 3422 | * timeout value to use |
3169 | */ | 3423 | */ |
3170 | 3424 | static unsigned long ata_pio_poll(struct ata_queued_cmd *qc) | |
3171 | static unsigned long ata_pio_poll(struct ata_port *ap) | ||
3172 | { | 3425 | { |
3173 | struct ata_queued_cmd *qc; | 3426 | struct ata_port *ap = qc->ap; |
3174 | u8 status; | 3427 | u8 status; |
3175 | unsigned int poll_state = HSM_ST_UNKNOWN; | 3428 | unsigned int poll_state = HSM_ST_UNKNOWN; |
3176 | unsigned int reg_state = HSM_ST_UNKNOWN; | 3429 | unsigned int reg_state = HSM_ST_UNKNOWN; |
3177 | 3430 | ||
3178 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3179 | WARN_ON(qc == NULL); | ||
3180 | |||
3181 | switch (ap->hsm_task_state) { | 3431 | switch (ap->hsm_task_state) { |
3182 | case HSM_ST: | 3432 | case HSM_ST: |
3183 | case HSM_ST_POLL: | 3433 | case HSM_ST_POLL: |
@@ -3211,7 +3461,7 @@ static unsigned long ata_pio_poll(struct ata_port *ap) | |||
3211 | 3461 | ||
3212 | /** | 3462 | /** |
3213 | * ata_pio_complete - check if drive is busy or idle | 3463 | * ata_pio_complete - check if drive is busy or idle |
3214 | * @ap: the target ata_port | 3464 | * @qc: qc to complete |
3215 | * | 3465 | * |
3216 | * LOCKING: | 3466 | * LOCKING: |
3217 | * None. (executing in kernel thread context) | 3467 | * None. (executing in kernel thread context) |
@@ -3219,10 +3469,9 @@ static unsigned long ata_pio_poll(struct ata_port *ap) | |||
3219 | * RETURNS: | 3469 | * RETURNS: |
3220 | * Non-zero if qc completed, zero otherwise. | 3470 | * Non-zero if qc completed, zero otherwise. |
3221 | */ | 3471 | */ |
3222 | 3472 | static int ata_pio_complete(struct ata_queued_cmd *qc) | |
3223 | static int ata_pio_complete (struct ata_port *ap) | ||
3224 | { | 3473 | { |
3225 | struct ata_queued_cmd *qc; | 3474 | struct ata_port *ap = qc->ap; |
3226 | u8 drv_stat; | 3475 | u8 drv_stat; |
3227 | 3476 | ||
3228 | /* | 3477 | /* |
@@ -3243,9 +3492,6 @@ static int ata_pio_complete (struct ata_port *ap) | |||
3243 | } | 3492 | } |
3244 | } | 3493 | } |
3245 | 3494 | ||
3246 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3247 | WARN_ON(qc == NULL); | ||
3248 | |||
3249 | drv_stat = ata_wait_idle(ap); | 3495 | drv_stat = ata_wait_idle(ap); |
3250 | if (!ata_ok(drv_stat)) { | 3496 | if (!ata_ok(drv_stat)) { |
3251 | qc->err_mask |= __ac_err_mask(drv_stat); | 3497 | qc->err_mask |= __ac_err_mask(drv_stat); |
@@ -3581,15 +3827,14 @@ err_out: | |||
3581 | 3827 | ||
3582 | /** | 3828 | /** |
3583 | * ata_pio_block - start PIO on a block | 3829 | * ata_pio_block - start PIO on a block |
3584 | * @ap: the target ata_port | 3830 | * @qc: qc to transfer block for |
3585 | * | 3831 | * |
3586 | * LOCKING: | 3832 | * LOCKING: |
3587 | * None. (executing in kernel thread context) | 3833 | * None. (executing in kernel thread context) |
3588 | */ | 3834 | */ |
3589 | 3835 | static void ata_pio_block(struct ata_queued_cmd *qc) | |
3590 | static void ata_pio_block(struct ata_port *ap) | ||
3591 | { | 3836 | { |
3592 | struct ata_queued_cmd *qc; | 3837 | struct ata_port *ap = qc->ap; |
3593 | u8 status; | 3838 | u8 status; |
3594 | 3839 | ||
3595 | /* | 3840 | /* |
@@ -3611,9 +3856,6 @@ static void ata_pio_block(struct ata_port *ap) | |||
3611 | } | 3856 | } |
3612 | } | 3857 | } |
3613 | 3858 | ||
3614 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3615 | WARN_ON(qc == NULL); | ||
3616 | |||
3617 | /* check error */ | 3859 | /* check error */ |
3618 | if (status & (ATA_ERR | ATA_DF)) { | 3860 | if (status & (ATA_ERR | ATA_DF)) { |
3619 | qc->err_mask |= AC_ERR_DEV; | 3861 | qc->err_mask |= AC_ERR_DEV; |
@@ -3642,15 +3884,13 @@ static void ata_pio_block(struct ata_port *ap) | |||
3642 | } | 3884 | } |
3643 | } | 3885 | } |
3644 | 3886 | ||
3645 | static void ata_pio_error(struct ata_port *ap) | 3887 | static void ata_pio_error(struct ata_queued_cmd *qc) |
3646 | { | 3888 | { |
3647 | struct ata_queued_cmd *qc; | 3889 | struct ata_port *ap = qc->ap; |
3648 | |||
3649 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3650 | WARN_ON(qc == NULL); | ||
3651 | 3890 | ||
3652 | if (qc->tf.command != ATA_CMD_PACKET) | 3891 | if (qc->tf.command != ATA_CMD_PACKET) |
3653 | printk(KERN_WARNING "ata%u: PIO error\n", ap->id); | 3892 | printk(KERN_WARNING "ata%u: dev %u PIO error\n", |
3893 | ap->id, qc->dev->devno); | ||
3654 | 3894 | ||
3655 | /* make sure qc->err_mask is available to | 3895 | /* make sure qc->err_mask is available to |
3656 | * know what's wrong and recover | 3896 | * know what's wrong and recover |
@@ -3664,7 +3904,8 @@ static void ata_pio_error(struct ata_port *ap) | |||
3664 | 3904 | ||
3665 | static void ata_pio_task(void *_data) | 3905 | static void ata_pio_task(void *_data) |
3666 | { | 3906 | { |
3667 | struct ata_port *ap = _data; | 3907 | struct ata_queued_cmd *qc = _data; |
3908 | struct ata_port *ap = qc->ap; | ||
3668 | unsigned long timeout; | 3909 | unsigned long timeout; |
3669 | int qc_completed; | 3910 | int qc_completed; |
3670 | 3911 | ||
@@ -3677,33 +3918,33 @@ fsm_start: | |||
3677 | return; | 3918 | return; |
3678 | 3919 | ||
3679 | case HSM_ST: | 3920 | case HSM_ST: |
3680 | ata_pio_block(ap); | 3921 | ata_pio_block(qc); |
3681 | break; | 3922 | break; |
3682 | 3923 | ||
3683 | case HSM_ST_LAST: | 3924 | case HSM_ST_LAST: |
3684 | qc_completed = ata_pio_complete(ap); | 3925 | qc_completed = ata_pio_complete(qc); |
3685 | break; | 3926 | break; |
3686 | 3927 | ||
3687 | case HSM_ST_POLL: | 3928 | case HSM_ST_POLL: |
3688 | case HSM_ST_LAST_POLL: | 3929 | case HSM_ST_LAST_POLL: |
3689 | timeout = ata_pio_poll(ap); | 3930 | timeout = ata_pio_poll(qc); |
3690 | break; | 3931 | break; |
3691 | 3932 | ||
3692 | case HSM_ST_TMOUT: | 3933 | case HSM_ST_TMOUT: |
3693 | case HSM_ST_ERR: | 3934 | case HSM_ST_ERR: |
3694 | ata_pio_error(ap); | 3935 | ata_pio_error(qc); |
3695 | return; | 3936 | return; |
3696 | } | 3937 | } |
3697 | 3938 | ||
3698 | if (timeout) | 3939 | if (timeout) |
3699 | ata_port_queue_task(ap, ata_pio_task, ap, timeout); | 3940 | ata_port_queue_task(ap, ata_pio_task, qc, timeout); |
3700 | else if (!qc_completed) | 3941 | else if (!qc_completed) |
3701 | goto fsm_start; | 3942 | goto fsm_start; |
3702 | } | 3943 | } |
3703 | 3944 | ||
3704 | /** | 3945 | /** |
3705 | * atapi_packet_task - Write CDB bytes to hardware | 3946 | * atapi_packet_task - Write CDB bytes to hardware |
3706 | * @_data: Port to which ATAPI device is attached. | 3947 | * @_data: qc in progress |
3707 | * | 3948 | * |
3708 | * When device has indicated its readiness to accept | 3949 | * When device has indicated its readiness to accept |
3709 | * a CDB, this function is called. Send the CDB. | 3950 | * a CDB, this function is called. Send the CDB. |
@@ -3714,17 +3955,12 @@ fsm_start: | |||
3714 | * LOCKING: | 3955 | * LOCKING: |
3715 | * Kernel thread context (may sleep) | 3956 | * Kernel thread context (may sleep) |
3716 | */ | 3957 | */ |
3717 | |||
3718 | static void atapi_packet_task(void *_data) | 3958 | static void atapi_packet_task(void *_data) |
3719 | { | 3959 | { |
3720 | struct ata_port *ap = _data; | 3960 | struct ata_queued_cmd *qc = _data; |
3721 | struct ata_queued_cmd *qc; | 3961 | struct ata_port *ap = qc->ap; |
3722 | u8 status; | 3962 | u8 status; |
3723 | 3963 | ||
3724 | qc = ata_qc_from_tag(ap, ap->active_tag); | ||
3725 | WARN_ON(qc == NULL); | ||
3726 | WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); | ||
3727 | |||
3728 | /* sleep-wait for BSY to clear */ | 3964 | /* sleep-wait for BSY to clear */ |
3729 | DPRINTK("busy wait\n"); | 3965 | DPRINTK("busy wait\n"); |
3730 | if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { | 3966 | if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB)) { |
@@ -3764,7 +4000,7 @@ static void atapi_packet_task(void *_data) | |||
3764 | 4000 | ||
3765 | /* PIO commands are handled by polling */ | 4001 | /* PIO commands are handled by polling */ |
3766 | ap->hsm_task_state = HSM_ST; | 4002 | ap->hsm_task_state = HSM_ST; |
3767 | ata_port_queue_task(ap, ata_pio_task, ap, 0); | 4003 | ata_port_queue_task(ap, ata_pio_task, qc, 0); |
3768 | } | 4004 | } |
3769 | 4005 | ||
3770 | return; | 4006 | return; |
@@ -3774,99 +4010,6 @@ err_out: | |||
3774 | } | 4010 | } |
3775 | 4011 | ||
3776 | /** | 4012 | /** |
3777 | * ata_qc_timeout - Handle timeout of queued command | ||
3778 | * @qc: Command that timed out | ||
3779 | * | ||
3780 | * Some part of the kernel (currently, only the SCSI layer) | ||
3781 | * has noticed that the active command on port @ap has not | ||
3782 | * completed after a specified length of time. Handle this | ||
3783 | * condition by disabling DMA (if necessary) and completing | ||
3784 | * transactions, with error if necessary. | ||
3785 | * | ||
3786 | * This also handles the case of the "lost interrupt", where | ||
3787 | * for some reason (possibly hardware bug, possibly driver bug) | ||
3788 | * an interrupt was not delivered to the driver, even though the | ||
3789 | * transaction completed successfully. | ||
3790 | * | ||
3791 | * LOCKING: | ||
3792 | * Inherited from SCSI layer (none, can sleep) | ||
3793 | */ | ||
3794 | |||
3795 | static void ata_qc_timeout(struct ata_queued_cmd *qc) | ||
3796 | { | ||
3797 | struct ata_port *ap = qc->ap; | ||
3798 | struct ata_host_set *host_set = ap->host_set; | ||
3799 | u8 host_stat = 0, drv_stat; | ||
3800 | unsigned long flags; | ||
3801 | |||
3802 | DPRINTK("ENTER\n"); | ||
3803 | |||
3804 | ap->hsm_task_state = HSM_ST_IDLE; | ||
3805 | |||
3806 | spin_lock_irqsave(&host_set->lock, flags); | ||
3807 | |||
3808 | switch (qc->tf.protocol) { | ||
3809 | |||
3810 | case ATA_PROT_DMA: | ||
3811 | case ATA_PROT_ATAPI_DMA: | ||
3812 | host_stat = ap->ops->bmdma_status(ap); | ||
3813 | |||
3814 | /* before we do anything else, clear DMA-Start bit */ | ||
3815 | ap->ops->bmdma_stop(qc); | ||
3816 | |||
3817 | /* fall through */ | ||
3818 | |||
3819 | default: | ||
3820 | ata_altstatus(ap); | ||
3821 | drv_stat = ata_chk_status(ap); | ||
3822 | |||
3823 | /* ack bmdma irq events */ | ||
3824 | ap->ops->irq_clear(ap); | ||
3825 | |||
3826 | printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n", | ||
3827 | ap->id, qc->tf.command, drv_stat, host_stat); | ||
3828 | |||
3829 | /* complete taskfile transaction */ | ||
3830 | qc->err_mask |= ac_err_mask(drv_stat); | ||
3831 | break; | ||
3832 | } | ||
3833 | |||
3834 | spin_unlock_irqrestore(&host_set->lock, flags); | ||
3835 | |||
3836 | ata_eh_qc_complete(qc); | ||
3837 | |||
3838 | DPRINTK("EXIT\n"); | ||
3839 | } | ||
3840 | |||
3841 | /** | ||
3842 | * ata_eng_timeout - Handle timeout of queued command | ||
3843 | * @ap: Port on which timed-out command is active | ||
3844 | * | ||
3845 | * Some part of the kernel (currently, only the SCSI layer) | ||
3846 | * has noticed that the active command on port @ap has not | ||
3847 | * completed after a specified length of time. Handle this | ||
3848 | * condition by disabling DMA (if necessary) and completing | ||
3849 | * transactions, with error if necessary. | ||
3850 | * | ||
3851 | * This also handles the case of the "lost interrupt", where | ||
3852 | * for some reason (possibly hardware bug, possibly driver bug) | ||
3853 | * an interrupt was not delivered to the driver, even though the | ||
3854 | * transaction completed successfully. | ||
3855 | * | ||
3856 | * LOCKING: | ||
3857 | * Inherited from SCSI layer (none, can sleep) | ||
3858 | */ | ||
3859 | |||
3860 | void ata_eng_timeout(struct ata_port *ap) | ||
3861 | { | ||
3862 | DPRINTK("ENTER\n"); | ||
3863 | |||
3864 | ata_qc_timeout(ata_qc_from_tag(ap, ap->active_tag)); | ||
3865 | |||
3866 | DPRINTK("EXIT\n"); | ||
3867 | } | ||
3868 | |||
3869 | /** | ||
3870 | * ata_qc_new - Request an available ATA command, for queueing | 4013 | * ata_qc_new - Request an available ATA command, for queueing |
3871 | * @ap: Port associated with device @dev | 4014 | * @ap: Port associated with device @dev |
3872 | * @dev: Device from whom we request an available command structure | 4015 | * @dev: Device from whom we request an available command structure |
@@ -3997,15 +4140,14 @@ static inline int ata_should_dma_map(struct ata_queued_cmd *qc) | |||
3997 | * | 4140 | * |
3998 | * LOCKING: | 4141 | * LOCKING: |
3999 | * spin_lock_irqsave(host_set lock) | 4142 | * spin_lock_irqsave(host_set lock) |
4000 | * | ||
4001 | * RETURNS: | ||
4002 | * Zero on success, AC_ERR_* mask on failure | ||
4003 | */ | 4143 | */ |
4004 | 4144 | void ata_qc_issue(struct ata_queued_cmd *qc) | |
4005 | unsigned int ata_qc_issue(struct ata_queued_cmd *qc) | ||
4006 | { | 4145 | { |
4007 | struct ata_port *ap = qc->ap; | 4146 | struct ata_port *ap = qc->ap; |
4008 | 4147 | ||
4148 | qc->ap->active_tag = qc->tag; | ||
4149 | qc->flags |= ATA_QCFLAG_ACTIVE; | ||
4150 | |||
4009 | if (ata_should_dma_map(qc)) { | 4151 | if (ata_should_dma_map(qc)) { |
4010 | if (qc->flags & ATA_QCFLAG_SG) { | 4152 | if (qc->flags & ATA_QCFLAG_SG) { |
4011 | if (ata_sg_setup(qc)) | 4153 | if (ata_sg_setup(qc)) |
@@ -4020,17 +4162,18 @@ unsigned int ata_qc_issue(struct ata_queued_cmd *qc) | |||
4020 | 4162 | ||
4021 | ap->ops->qc_prep(qc); | 4163 | ap->ops->qc_prep(qc); |
4022 | 4164 | ||
4023 | qc->ap->active_tag = qc->tag; | 4165 | qc->err_mask |= ap->ops->qc_issue(qc); |
4024 | qc->flags |= ATA_QCFLAG_ACTIVE; | 4166 | if (unlikely(qc->err_mask)) |
4025 | 4167 | goto err; | |
4026 | return ap->ops->qc_issue(qc); | 4168 | return; |
4027 | 4169 | ||
4028 | sg_err: | 4170 | sg_err: |
4029 | qc->flags &= ~ATA_QCFLAG_DMAMAP; | 4171 | qc->flags &= ~ATA_QCFLAG_DMAMAP; |
4030 | return AC_ERR_SYSTEM; | 4172 | qc->err_mask |= AC_ERR_SYSTEM; |
4173 | err: | ||
4174 | ata_qc_complete(qc); | ||
4031 | } | 4175 | } |
4032 | 4176 | ||
4033 | |||
4034 | /** | 4177 | /** |
4035 | * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner | 4178 | * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner |
4036 | * @qc: command to issue to device | 4179 | * @qc: command to issue to device |
@@ -4070,26 +4213,26 @@ unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc) | |||
4070 | ata_qc_set_polling(qc); | 4213 | ata_qc_set_polling(qc); |
4071 | ata_tf_to_host(ap, &qc->tf); | 4214 | ata_tf_to_host(ap, &qc->tf); |
4072 | ap->hsm_task_state = HSM_ST; | 4215 | ap->hsm_task_state = HSM_ST; |
4073 | ata_port_queue_task(ap, ata_pio_task, ap, 0); | 4216 | ata_port_queue_task(ap, ata_pio_task, qc, 0); |
4074 | break; | 4217 | break; |
4075 | 4218 | ||
4076 | case ATA_PROT_ATAPI: | 4219 | case ATA_PROT_ATAPI: |
4077 | ata_qc_set_polling(qc); | 4220 | ata_qc_set_polling(qc); |
4078 | ata_tf_to_host(ap, &qc->tf); | 4221 | ata_tf_to_host(ap, &qc->tf); |
4079 | ata_port_queue_task(ap, atapi_packet_task, ap, 0); | 4222 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); |
4080 | break; | 4223 | break; |
4081 | 4224 | ||
4082 | case ATA_PROT_ATAPI_NODATA: | 4225 | case ATA_PROT_ATAPI_NODATA: |
4083 | ap->flags |= ATA_FLAG_NOINTR; | 4226 | ap->flags |= ATA_FLAG_NOINTR; |
4084 | ata_tf_to_host(ap, &qc->tf); | 4227 | ata_tf_to_host(ap, &qc->tf); |
4085 | ata_port_queue_task(ap, atapi_packet_task, ap, 0); | 4228 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); |
4086 | break; | 4229 | break; |
4087 | 4230 | ||
4088 | case ATA_PROT_ATAPI_DMA: | 4231 | case ATA_PROT_ATAPI_DMA: |
4089 | ap->flags |= ATA_FLAG_NOINTR; | 4232 | ap->flags |= ATA_FLAG_NOINTR; |
4090 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ | 4233 | ap->ops->tf_load(ap, &qc->tf); /* load tf registers */ |
4091 | ap->ops->bmdma_setup(qc); /* set up bmdma */ | 4234 | ap->ops->bmdma_setup(qc); /* set up bmdma */ |
4092 | ata_port_queue_task(ap, atapi_packet_task, ap, 0); | 4235 | ata_port_queue_task(ap, atapi_packet_task, qc, 0); |
4093 | break; | 4236 | break; |
4094 | 4237 | ||
4095 | default: | 4238 | default: |
@@ -4211,7 +4354,7 @@ irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs) | |||
4211 | 4354 | ||
4212 | ap = host_set->ports[i]; | 4355 | ap = host_set->ports[i]; |
4213 | if (ap && | 4356 | if (ap && |
4214 | !(ap->flags & (ATA_FLAG_PORT_DISABLED | ATA_FLAG_NOINTR))) { | 4357 | !(ap->flags & (ATA_FLAG_DISABLED | ATA_FLAG_NOINTR))) { |
4215 | struct ata_queued_cmd *qc; | 4358 | struct ata_queued_cmd *qc; |
4216 | 4359 | ||
4217 | qc = ata_qc_from_tag(ap, ap->active_tag); | 4360 | qc = ata_qc_from_tag(ap, ap->active_tag); |
@@ -4243,7 +4386,7 @@ static int ata_do_simple_cmd(struct ata_port *ap, struct ata_device *dev, | |||
4243 | tf.flags |= ATA_TFLAG_DEVICE; | 4386 | tf.flags |= ATA_TFLAG_DEVICE; |
4244 | tf.protocol = ATA_PROT_NODATA; | 4387 | tf.protocol = ATA_PROT_NODATA; |
4245 | 4388 | ||
4246 | err = ata_exec_internal(ap, dev, &tf, DMA_NONE, NULL, 0); | 4389 | err = ata_exec_internal(ap, dev, &tf, NULL, DMA_NONE, NULL, 0); |
4247 | if (err) | 4390 | if (err) |
4248 | printk(KERN_ERR "%s: ata command failed: %d\n", | 4391 | printk(KERN_ERR "%s: ata command failed: %d\n", |
4249 | __FUNCTION__, err); | 4392 | __FUNCTION__, err); |
@@ -4289,10 +4432,12 @@ static int ata_start_drive(struct ata_port *ap, struct ata_device *dev) | |||
4289 | int ata_device_resume(struct ata_port *ap, struct ata_device *dev) | 4432 | int ata_device_resume(struct ata_port *ap, struct ata_device *dev) |
4290 | { | 4433 | { |
4291 | if (ap->flags & ATA_FLAG_SUSPENDED) { | 4434 | if (ap->flags & ATA_FLAG_SUSPENDED) { |
4435 | struct ata_device *failed_dev; | ||
4292 | ap->flags &= ~ATA_FLAG_SUSPENDED; | 4436 | ap->flags &= ~ATA_FLAG_SUSPENDED; |
4293 | ata_set_mode(ap); | 4437 | while (ata_set_mode(ap, &failed_dev)) |
4438 | ata_dev_disable(ap, failed_dev); | ||
4294 | } | 4439 | } |
4295 | if (!ata_dev_present(dev)) | 4440 | if (!ata_dev_enabled(dev)) |
4296 | return 0; | 4441 | return 0; |
4297 | if (dev->class == ATA_DEV_ATA) | 4442 | if (dev->class == ATA_DEV_ATA) |
4298 | ata_start_drive(ap, dev); | 4443 | ata_start_drive(ap, dev); |
@@ -4310,7 +4455,7 @@ int ata_device_resume(struct ata_port *ap, struct ata_device *dev) | |||
4310 | */ | 4455 | */ |
4311 | int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) | 4456 | int ata_device_suspend(struct ata_port *ap, struct ata_device *dev, pm_message_t state) |
4312 | { | 4457 | { |
4313 | if (!ata_dev_present(dev)) | 4458 | if (!ata_dev_enabled(dev)) |
4314 | return 0; | 4459 | return 0; |
4315 | if (dev->class == ATA_DEV_ATA) | 4460 | if (dev->class == ATA_DEV_ATA) |
4316 | ata_flush_cache(ap, dev); | 4461 | ata_flush_cache(ap, dev); |
@@ -4430,7 +4575,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
4430 | host->unique_id = ata_unique_id++; | 4575 | host->unique_id = ata_unique_id++; |
4431 | host->max_cmd_len = 12; | 4576 | host->max_cmd_len = 12; |
4432 | 4577 | ||
4433 | ap->flags = ATA_FLAG_PORT_DISABLED; | 4578 | ap->flags = ATA_FLAG_DISABLED; |
4434 | ap->id = host->unique_id; | 4579 | ap->id = host->unique_id; |
4435 | ap->host = host; | 4580 | ap->host = host; |
4436 | ap->ctl = ATA_DEVCTL_OBS; | 4581 | ap->ctl = ATA_DEVCTL_OBS; |
@@ -4445,6 +4590,7 @@ static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host, | |||
4445 | ap->flags |= ent->host_flags; | 4590 | ap->flags |= ent->host_flags; |
4446 | ap->ops = ent->port_ops; | 4591 | ap->ops = ent->port_ops; |
4447 | ap->cbl = ATA_CBL_NONE; | 4592 | ap->cbl = ATA_CBL_NONE; |
4593 | ap->sata_spd_limit = UINT_MAX; | ||
4448 | ap->active_tag = ATA_TAG_POISON; | 4594 | ap->active_tag = ATA_TAG_POISON; |
4449 | ap->last_ctl = 0xFF; | 4595 | ap->last_ctl = 0xFF; |
4450 | 4596 | ||
@@ -4505,7 +4651,7 @@ static struct ata_port * ata_host_add(const struct ata_probe_ent *ent, | |||
4505 | 4651 | ||
4506 | host->transportt = &ata_scsi_transport_template; | 4652 | host->transportt = &ata_scsi_transport_template; |
4507 | 4653 | ||
4508 | ap = (struct ata_port *) &host->hostdata[0]; | 4654 | ap = ata_shost_to_port(host); |
4509 | 4655 | ||
4510 | ata_host_init(ap, host, host_set, ent, port_no); | 4656 | ata_host_init(ap, host, host_set, ent, port_no); |
4511 | 4657 | ||
@@ -4718,7 +4864,7 @@ void ata_host_set_remove(struct ata_host_set *host_set) | |||
4718 | 4864 | ||
4719 | int ata_scsi_release(struct Scsi_Host *host) | 4865 | int ata_scsi_release(struct Scsi_Host *host) |
4720 | { | 4866 | { |
4721 | struct ata_port *ap = (struct ata_port *) &host->hostdata[0]; | 4867 | struct ata_port *ap = ata_shost_to_port(host); |
4722 | int i; | 4868 | int i; |
4723 | 4869 | ||
4724 | DPRINTK("ENTER\n"); | 4870 | DPRINTK("ENTER\n"); |
@@ -4885,6 +5031,52 @@ int ata_ratelimit(void) | |||
4885 | return rc; | 5031 | return rc; |
4886 | } | 5032 | } |
4887 | 5033 | ||
5034 | /** | ||
5035 | * ata_wait_register - wait until register value changes | ||
5036 | * @reg: IO-mapped register | ||
5037 | * @mask: Mask to apply to read register value | ||
5038 | * @val: Wait condition | ||
5039 | * @interval_msec: polling interval in milliseconds | ||
5040 | * @timeout_msec: timeout in milliseconds | ||
5041 | * | ||
5042 | * Waiting for some bits of register to change is a common | ||
5043 | * operation for ATA controllers. This function reads 32bit LE | ||
5044 | * IO-mapped register @reg and tests for the following condition. | ||
5045 | * | ||
5046 | * (*@reg & mask) != val | ||
5047 | * | ||
5048 | * If the condition is met, it returns; otherwise, the process is | ||
5049 | * repeated after @interval_msec until timeout. | ||
5050 | * | ||
5051 | * LOCKING: | ||
5052 | * Kernel thread context (may sleep) | ||
5053 | * | ||
5054 | * RETURNS: | ||
5055 | * The final register value. | ||
5056 | */ | ||
5057 | u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val, | ||
5058 | unsigned long interval_msec, | ||
5059 | unsigned long timeout_msec) | ||
5060 | { | ||
5061 | unsigned long timeout; | ||
5062 | u32 tmp; | ||
5063 | |||
5064 | tmp = ioread32(reg); | ||
5065 | |||
5066 | /* Calculate timeout _after_ the first read to make sure | ||
5067 | * preceding writes reach the controller before starting to | ||
5068 | * eat away the timeout. | ||
5069 | */ | ||
5070 | timeout = jiffies + (timeout_msec * HZ) / 1000; | ||
5071 | |||
5072 | while ((tmp & mask) == val && time_before(jiffies, timeout)) { | ||
5073 | msleep(interval_msec); | ||
5074 | tmp = ioread32(reg); | ||
5075 | } | ||
5076 | |||
5077 | return tmp; | ||
5078 | } | ||
5079 | |||
4888 | /* | 5080 | /* |
4889 | * libata is essentially a library of internal helper functions for | 5081 | * libata is essentially a library of internal helper functions for |
4890 | * low-level ATA host controller drivers. As such, the API/ABI is | 5082 | * low-level ATA host controller drivers. As such, the API/ABI is |
@@ -4900,7 +5092,6 @@ EXPORT_SYMBOL_GPL(ata_sg_init); | |||
4900 | EXPORT_SYMBOL_GPL(ata_sg_init_one); | 5092 | EXPORT_SYMBOL_GPL(ata_sg_init_one); |
4901 | EXPORT_SYMBOL_GPL(__ata_qc_complete); | 5093 | EXPORT_SYMBOL_GPL(__ata_qc_complete); |
4902 | EXPORT_SYMBOL_GPL(ata_qc_issue_prot); | 5094 | EXPORT_SYMBOL_GPL(ata_qc_issue_prot); |
4903 | EXPORT_SYMBOL_GPL(ata_eng_timeout); | ||
4904 | EXPORT_SYMBOL_GPL(ata_tf_load); | 5095 | EXPORT_SYMBOL_GPL(ata_tf_load); |
4905 | EXPORT_SYMBOL_GPL(ata_tf_read); | 5096 | EXPORT_SYMBOL_GPL(ata_tf_read); |
4906 | EXPORT_SYMBOL_GPL(ata_noop_dev_select); | 5097 | EXPORT_SYMBOL_GPL(ata_noop_dev_select); |
@@ -4922,6 +5113,7 @@ EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear); | |||
4922 | EXPORT_SYMBOL_GPL(ata_bmdma_status); | 5113 | EXPORT_SYMBOL_GPL(ata_bmdma_status); |
4923 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); | 5114 | EXPORT_SYMBOL_GPL(ata_bmdma_stop); |
4924 | EXPORT_SYMBOL_GPL(ata_port_probe); | 5115 | EXPORT_SYMBOL_GPL(ata_port_probe); |
5116 | EXPORT_SYMBOL_GPL(ata_set_sata_spd); | ||
4925 | EXPORT_SYMBOL_GPL(sata_phy_reset); | 5117 | EXPORT_SYMBOL_GPL(sata_phy_reset); |
4926 | EXPORT_SYMBOL_GPL(__sata_phy_reset); | 5118 | EXPORT_SYMBOL_GPL(__sata_phy_reset); |
4927 | EXPORT_SYMBOL_GPL(ata_bus_reset); | 5119 | EXPORT_SYMBOL_GPL(ata_bus_reset); |
@@ -4936,19 +5128,17 @@ EXPORT_SYMBOL_GPL(ata_dev_classify); | |||
4936 | EXPORT_SYMBOL_GPL(ata_dev_pair); | 5128 | EXPORT_SYMBOL_GPL(ata_dev_pair); |
4937 | EXPORT_SYMBOL_GPL(ata_port_disable); | 5129 | EXPORT_SYMBOL_GPL(ata_port_disable); |
4938 | EXPORT_SYMBOL_GPL(ata_ratelimit); | 5130 | EXPORT_SYMBOL_GPL(ata_ratelimit); |
5131 | EXPORT_SYMBOL_GPL(ata_wait_register); | ||
4939 | EXPORT_SYMBOL_GPL(ata_busy_sleep); | 5132 | EXPORT_SYMBOL_GPL(ata_busy_sleep); |
4940 | EXPORT_SYMBOL_GPL(ata_port_queue_task); | 5133 | EXPORT_SYMBOL_GPL(ata_port_queue_task); |
4941 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); | 5134 | EXPORT_SYMBOL_GPL(ata_scsi_ioctl); |
4942 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); | 5135 | EXPORT_SYMBOL_GPL(ata_scsi_queuecmd); |
4943 | EXPORT_SYMBOL_GPL(ata_scsi_error); | ||
4944 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); | 5136 | EXPORT_SYMBOL_GPL(ata_scsi_slave_config); |
4945 | EXPORT_SYMBOL_GPL(ata_scsi_release); | 5137 | EXPORT_SYMBOL_GPL(ata_scsi_release); |
4946 | EXPORT_SYMBOL_GPL(ata_host_intr); | 5138 | EXPORT_SYMBOL_GPL(ata_host_intr); |
4947 | EXPORT_SYMBOL_GPL(ata_id_string); | 5139 | EXPORT_SYMBOL_GPL(ata_id_string); |
4948 | EXPORT_SYMBOL_GPL(ata_id_c_string); | 5140 | EXPORT_SYMBOL_GPL(ata_id_c_string); |
4949 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); | 5141 | EXPORT_SYMBOL_GPL(ata_scsi_simulate); |
4950 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); | ||
4951 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); | ||
4952 | 5142 | ||
4953 | EXPORT_SYMBOL_GPL(ata_pio_need_iordy); | 5143 | EXPORT_SYMBOL_GPL(ata_pio_need_iordy); |
4954 | EXPORT_SYMBOL_GPL(ata_timing_compute); | 5144 | EXPORT_SYMBOL_GPL(ata_timing_compute); |
@@ -4970,3 +5160,8 @@ EXPORT_SYMBOL_GPL(ata_device_suspend); | |||
4970 | EXPORT_SYMBOL_GPL(ata_device_resume); | 5160 | EXPORT_SYMBOL_GPL(ata_device_resume); |
4971 | EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); | 5161 | EXPORT_SYMBOL_GPL(ata_scsi_device_suspend); |
4972 | EXPORT_SYMBOL_GPL(ata_scsi_device_resume); | 5162 | EXPORT_SYMBOL_GPL(ata_scsi_device_resume); |
5163 | |||
5164 | EXPORT_SYMBOL_GPL(ata_scsi_error); | ||
5165 | EXPORT_SYMBOL_GPL(ata_eng_timeout); | ||
5166 | EXPORT_SYMBOL_GPL(ata_eh_qc_complete); | ||
5167 | EXPORT_SYMBOL_GPL(ata_eh_qc_retry); | ||