diff options
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/Kconfig | 10 | ||||
-rw-r--r-- | drivers/ata/ahci.c | 191 | ||||
-rw-r--r-- | drivers/ata/ata_piix.c | 16 | ||||
-rw-r--r-- | drivers/ata/libata-acpi.c | 165 | ||||
-rw-r--r-- | drivers/ata/libata-core.c | 6 | ||||
-rw-r--r-- | drivers/ata/libata-pmp.c | 7 | ||||
-rw-r--r-- | drivers/ata/libata-scsi.c | 19 | ||||
-rw-r--r-- | drivers/ata/libata-sff.c | 145 | ||||
-rw-r--r-- | drivers/ata/libata.h | 2 | ||||
-rw-r--r-- | drivers/ata/pata_icside.c | 2 | ||||
-rw-r--r-- | drivers/ata/pata_pcmcia.c | 2 | ||||
-rw-r--r-- | drivers/ata/pata_rb532_cf.c | 4 | ||||
-rw-r--r-- | drivers/ata/pata_scc.c | 5 | ||||
-rw-r--r-- | drivers/ata/sata_fsl.c | 224 | ||||
-rw-r--r-- | drivers/ata/sata_mv.c | 122 | ||||
-rw-r--r-- | drivers/ata/sata_sil24.c | 1 | ||||
-rw-r--r-- | drivers/ata/sata_uli.c | 1 |
17 files changed, 670 insertions, 252 deletions
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 9bf2986a2788..ae8494944c45 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig | |||
@@ -651,9 +651,17 @@ config PATA_WINBOND_VLB | |||
651 | Support for the Winbond W83759A controller on Vesa Local Bus | 651 | Support for the Winbond W83759A controller on Vesa Local Bus |
652 | systems. | 652 | systems. |
653 | 653 | ||
654 | config HAVE_PATA_PLATFORM | ||
655 | bool | ||
656 | help | ||
657 | This is an internal configuration node for any machine that | ||
658 | uses pata-platform driver to enable the relevant driver in the | ||
659 | configuration structure without having to submit endless patches | ||
660 | to update the PATA_PLATFORM entry. | ||
661 | |||
654 | config PATA_PLATFORM | 662 | config PATA_PLATFORM |
655 | tristate "Generic platform device PATA support" | 663 | tristate "Generic platform device PATA support" |
656 | depends on EMBEDDED || ARCH_RPC || PPC | 664 | depends on EMBEDDED || ARCH_RPC || PPC || HAVE_PATA_PLATFORM |
657 | help | 665 | help |
658 | This option enables support for generic directly connected ATA | 666 | This option enables support for generic directly connected ATA |
659 | devices commonly found on embedded systems. | 667 | devices commonly found on embedded systems. |
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 97f83fb2ee2e..5e6468a7ca4b 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c | |||
@@ -89,6 +89,8 @@ enum { | |||
89 | board_ahci_sb600 = 3, | 89 | board_ahci_sb600 = 3, |
90 | board_ahci_mv = 4, | 90 | board_ahci_mv = 4, |
91 | board_ahci_sb700 = 5, | 91 | board_ahci_sb700 = 5, |
92 | board_ahci_mcp65 = 6, | ||
93 | board_ahci_nopmp = 7, | ||
92 | 94 | ||
93 | /* global controller registers */ | 95 | /* global controller registers */ |
94 | HOST_CAP = 0x00, /* host capabilities */ | 96 | HOST_CAP = 0x00, /* host capabilities */ |
@@ -190,6 +192,7 @@ enum { | |||
190 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ | 192 | AHCI_HFLAG_NO_PMP = (1 << 6), /* no PMP */ |
191 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ | 193 | AHCI_HFLAG_NO_HOTPLUG = (1 << 7), /* ignore PxSERR.DIAG.N */ |
192 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ | 194 | AHCI_HFLAG_SECT255 = (1 << 8), /* max 255 sectors */ |
195 | AHCI_HFLAG_YES_NCQ = (1 << 9), /* force NCQ cap on */ | ||
193 | 196 | ||
194 | /* ap->flags bits */ | 197 | /* ap->flags bits */ |
195 | 198 | ||
@@ -253,6 +256,8 @@ static void ahci_pmp_attach(struct ata_port *ap); | |||
253 | static void ahci_pmp_detach(struct ata_port *ap); | 256 | static void ahci_pmp_detach(struct ata_port *ap); |
254 | static int ahci_softreset(struct ata_link *link, unsigned int *class, | 257 | static int ahci_softreset(struct ata_link *link, unsigned int *class, |
255 | unsigned long deadline); | 258 | unsigned long deadline); |
259 | static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, | ||
260 | unsigned long deadline); | ||
256 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, | 261 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, |
257 | unsigned long deadline); | 262 | unsigned long deadline); |
258 | static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, | 263 | static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class, |
@@ -329,6 +334,12 @@ static struct ata_port_operations ahci_p5wdh_ops = { | |||
329 | .hardreset = ahci_p5wdh_hardreset, | 334 | .hardreset = ahci_p5wdh_hardreset, |
330 | }; | 335 | }; |
331 | 336 | ||
337 | static struct ata_port_operations ahci_sb600_ops = { | ||
338 | .inherits = &ahci_ops, | ||
339 | .softreset = ahci_sb600_softreset, | ||
340 | .pmp_softreset = ahci_sb600_softreset, | ||
341 | }; | ||
342 | |||
332 | #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) | 343 | #define AHCI_HFLAGS(flags) .private_data = (void *)(flags) |
333 | 344 | ||
334 | static const struct ata_port_info ahci_port_info[] = { | 345 | static const struct ata_port_info ahci_port_info[] = { |
@@ -359,11 +370,11 @@ static const struct ata_port_info ahci_port_info[] = { | |||
359 | { | 370 | { |
360 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | | 371 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | |
361 | AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | | 372 | AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | |
362 | AHCI_HFLAG_SECT255 | AHCI_HFLAG_NO_PMP), | 373 | AHCI_HFLAG_SECT255), |
363 | .flags = AHCI_FLAG_COMMON, | 374 | .flags = AHCI_FLAG_COMMON, |
364 | .pio_mask = 0x1f, /* pio0-4 */ | 375 | .pio_mask = 0x1f, /* pio0-4 */ |
365 | .udma_mask = ATA_UDMA6, | 376 | .udma_mask = ATA_UDMA6, |
366 | .port_ops = &ahci_ops, | 377 | .port_ops = &ahci_sb600_ops, |
367 | }, | 378 | }, |
368 | /* board_ahci_mv */ | 379 | /* board_ahci_mv */ |
369 | { | 380 | { |
@@ -377,8 +388,23 @@ static const struct ata_port_info ahci_port_info[] = { | |||
377 | }, | 388 | }, |
378 | /* board_ahci_sb700 */ | 389 | /* board_ahci_sb700 */ |
379 | { | 390 | { |
380 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL | | 391 | AHCI_HFLAGS (AHCI_HFLAG_IGN_SERR_INTERNAL), |
381 | AHCI_HFLAG_NO_PMP), | 392 | .flags = AHCI_FLAG_COMMON, |
393 | .pio_mask = 0x1f, /* pio0-4 */ | ||
394 | .udma_mask = ATA_UDMA6, | ||
395 | .port_ops = &ahci_sb600_ops, | ||
396 | }, | ||
397 | /* board_ahci_mcp65 */ | ||
398 | { | ||
399 | AHCI_HFLAGS (AHCI_HFLAG_YES_NCQ), | ||
400 | .flags = AHCI_FLAG_COMMON, | ||
401 | .pio_mask = 0x1f, /* pio0-4 */ | ||
402 | .udma_mask = ATA_UDMA6, | ||
403 | .port_ops = &ahci_ops, | ||
404 | }, | ||
405 | /* board_ahci_nopmp */ | ||
406 | { | ||
407 | AHCI_HFLAGS (AHCI_HFLAG_NO_PMP), | ||
382 | .flags = AHCI_FLAG_COMMON, | 408 | .flags = AHCI_FLAG_COMMON, |
383 | .pio_mask = 0x1f, /* pio0-4 */ | 409 | .pio_mask = 0x1f, /* pio0-4 */ |
384 | .udma_mask = ATA_UDMA6, | 410 | .udma_mask = ATA_UDMA6, |
@@ -438,14 +464,14 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
438 | { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ | 464 | { PCI_VDEVICE(VIA, 0x6287), board_ahci_vt8251 }, /* VIA VT8251 */ |
439 | 465 | ||
440 | /* NVIDIA */ | 466 | /* NVIDIA */ |
441 | { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci }, /* MCP65 */ | 467 | { PCI_VDEVICE(NVIDIA, 0x044c), board_ahci_mcp65 }, /* MCP65 */ |
442 | { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci }, /* MCP65 */ | 468 | { PCI_VDEVICE(NVIDIA, 0x044d), board_ahci_mcp65 }, /* MCP65 */ |
443 | { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci }, /* MCP65 */ | 469 | { PCI_VDEVICE(NVIDIA, 0x044e), board_ahci_mcp65 }, /* MCP65 */ |
444 | { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci }, /* MCP65 */ | 470 | { PCI_VDEVICE(NVIDIA, 0x044f), board_ahci_mcp65 }, /* MCP65 */ |
445 | { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci }, /* MCP65 */ | 471 | { PCI_VDEVICE(NVIDIA, 0x045c), board_ahci_mcp65 }, /* MCP65 */ |
446 | { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci }, /* MCP65 */ | 472 | { PCI_VDEVICE(NVIDIA, 0x045d), board_ahci_mcp65 }, /* MCP65 */ |
447 | { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci }, /* MCP65 */ | 473 | { PCI_VDEVICE(NVIDIA, 0x045e), board_ahci_mcp65 }, /* MCP65 */ |
448 | { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci }, /* MCP65 */ | 474 | { PCI_VDEVICE(NVIDIA, 0x045f), board_ahci_mcp65 }, /* MCP65 */ |
449 | { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ | 475 | { PCI_VDEVICE(NVIDIA, 0x0550), board_ahci }, /* MCP67 */ |
450 | { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ | 476 | { PCI_VDEVICE(NVIDIA, 0x0551), board_ahci }, /* MCP67 */ |
451 | { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ | 477 | { PCI_VDEVICE(NVIDIA, 0x0552), board_ahci }, /* MCP67 */ |
@@ -502,15 +528,15 @@ static const struct pci_device_id ahci_pci_tbl[] = { | |||
502 | { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ | 528 | { PCI_VDEVICE(NVIDIA, 0x0bcd), board_ahci }, /* MCP7B */ |
503 | { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ | 529 | { PCI_VDEVICE(NVIDIA, 0x0bce), board_ahci }, /* MCP7B */ |
504 | { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ | 530 | { PCI_VDEVICE(NVIDIA, 0x0bcf), board_ahci }, /* MCP7B */ |
505 | { PCI_VDEVICE(NVIDIA, 0x0bd0), board_ahci }, /* MCP7B */ | 531 | { PCI_VDEVICE(NVIDIA, 0x0bc4), board_ahci }, /* MCP7B */ |
506 | { PCI_VDEVICE(NVIDIA, 0x0bd1), board_ahci }, /* MCP7B */ | 532 | { PCI_VDEVICE(NVIDIA, 0x0bc5), board_ahci }, /* MCP7B */ |
507 | { PCI_VDEVICE(NVIDIA, 0x0bd2), board_ahci }, /* MCP7B */ | 533 | { PCI_VDEVICE(NVIDIA, 0x0bc6), board_ahci }, /* MCP7B */ |
508 | { PCI_VDEVICE(NVIDIA, 0x0bd3), board_ahci }, /* MCP7B */ | 534 | { PCI_VDEVICE(NVIDIA, 0x0bc7), board_ahci }, /* MCP7B */ |
509 | 535 | ||
510 | /* SiS */ | 536 | /* SiS */ |
511 | { PCI_VDEVICE(SI, 0x1184), board_ahci }, /* SiS 966 */ | 537 | { PCI_VDEVICE(SI, 0x1184), board_ahci_nopmp }, /* SiS 966 */ |
512 | { PCI_VDEVICE(SI, 0x1185), board_ahci }, /* SiS 966 */ | 538 | { PCI_VDEVICE(SI, 0x1185), board_ahci_nopmp }, /* SiS 968 */ |
513 | { PCI_VDEVICE(SI, 0x0186), board_ahci }, /* SiS 968 */ | 539 | { PCI_VDEVICE(SI, 0x0186), board_ahci_nopmp }, /* SiS 968 */ |
514 | 540 | ||
515 | /* Marvell */ | 541 | /* Marvell */ |
516 | { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ | 542 | { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ |
@@ -624,12 +650,26 @@ static void ahci_save_initial_config(struct pci_dev *pdev, | |||
624 | cap &= ~HOST_CAP_NCQ; | 650 | cap &= ~HOST_CAP_NCQ; |
625 | } | 651 | } |
626 | 652 | ||
653 | if (!(cap & HOST_CAP_NCQ) && (hpriv->flags & AHCI_HFLAG_YES_NCQ)) { | ||
654 | dev_printk(KERN_INFO, &pdev->dev, | ||
655 | "controller can do NCQ, turning on CAP_NCQ\n"); | ||
656 | cap |= HOST_CAP_NCQ; | ||
657 | } | ||
658 | |||
627 | if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { | 659 | if ((cap & HOST_CAP_PMP) && (hpriv->flags & AHCI_HFLAG_NO_PMP)) { |
628 | dev_printk(KERN_INFO, &pdev->dev, | 660 | dev_printk(KERN_INFO, &pdev->dev, |
629 | "controller can't do PMP, turning off CAP_PMP\n"); | 661 | "controller can't do PMP, turning off CAP_PMP\n"); |
630 | cap &= ~HOST_CAP_PMP; | 662 | cap &= ~HOST_CAP_PMP; |
631 | } | 663 | } |
632 | 664 | ||
665 | if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361 && | ||
666 | port_map != 1) { | ||
667 | dev_printk(KERN_INFO, &pdev->dev, | ||
668 | "JMB361 has only one port, port_map 0x%x -> 0x%x\n", | ||
669 | port_map, 1); | ||
670 | port_map = 1; | ||
671 | } | ||
672 | |||
633 | /* | 673 | /* |
634 | * Temporary Marvell 6145 hack: PATA port presence | 674 | * Temporary Marvell 6145 hack: PATA port presence |
635 | * is asserted through the standard AHCI port | 675 | * is asserted through the standard AHCI port |
@@ -1262,19 +1302,11 @@ static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp, | |||
1262 | return 0; | 1302 | return 0; |
1263 | } | 1303 | } |
1264 | 1304 | ||
1265 | static int ahci_check_ready(struct ata_link *link) | 1305 | static int ahci_do_softreset(struct ata_link *link, unsigned int *class, |
1266 | { | 1306 | int pmp, unsigned long deadline, |
1267 | void __iomem *port_mmio = ahci_port_base(link->ap); | 1307 | int (*check_ready)(struct ata_link *link)) |
1268 | u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; | ||
1269 | |||
1270 | return ata_check_ready(status); | ||
1271 | } | ||
1272 | |||
1273 | static int ahci_softreset(struct ata_link *link, unsigned int *class, | ||
1274 | unsigned long deadline) | ||
1275 | { | 1308 | { |
1276 | struct ata_port *ap = link->ap; | 1309 | struct ata_port *ap = link->ap; |
1277 | int pmp = sata_srst_pmp(link); | ||
1278 | const char *reason = NULL; | 1310 | const char *reason = NULL; |
1279 | unsigned long now, msecs; | 1311 | unsigned long now, msecs; |
1280 | struct ata_taskfile tf; | 1312 | struct ata_taskfile tf; |
@@ -1312,7 +1344,7 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class, | |||
1312 | ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); | 1344 | ahci_exec_polled_cmd(ap, pmp, &tf, 0, 0, 0); |
1313 | 1345 | ||
1314 | /* wait for link to become ready */ | 1346 | /* wait for link to become ready */ |
1315 | rc = ata_wait_after_reset(link, deadline, ahci_check_ready); | 1347 | rc = ata_wait_after_reset(link, deadline, check_ready); |
1316 | /* link occupied, -ENODEV too is an error */ | 1348 | /* link occupied, -ENODEV too is an error */ |
1317 | if (rc) { | 1349 | if (rc) { |
1318 | reason = "device not ready"; | 1350 | reason = "device not ready"; |
@@ -1328,6 +1360,72 @@ static int ahci_softreset(struct ata_link *link, unsigned int *class, | |||
1328 | return rc; | 1360 | return rc; |
1329 | } | 1361 | } |
1330 | 1362 | ||
1363 | static int ahci_check_ready(struct ata_link *link) | ||
1364 | { | ||
1365 | void __iomem *port_mmio = ahci_port_base(link->ap); | ||
1366 | u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; | ||
1367 | |||
1368 | return ata_check_ready(status); | ||
1369 | } | ||
1370 | |||
1371 | static int ahci_softreset(struct ata_link *link, unsigned int *class, | ||
1372 | unsigned long deadline) | ||
1373 | { | ||
1374 | int pmp = sata_srst_pmp(link); | ||
1375 | |||
1376 | DPRINTK("ENTER\n"); | ||
1377 | |||
1378 | return ahci_do_softreset(link, class, pmp, deadline, ahci_check_ready); | ||
1379 | } | ||
1380 | |||
1381 | static int ahci_sb600_check_ready(struct ata_link *link) | ||
1382 | { | ||
1383 | void __iomem *port_mmio = ahci_port_base(link->ap); | ||
1384 | u8 status = readl(port_mmio + PORT_TFDATA) & 0xFF; | ||
1385 | u32 irq_status = readl(port_mmio + PORT_IRQ_STAT); | ||
1386 | |||
1387 | /* | ||
1388 | * There is no need to check TFDATA if BAD PMP is found due to HW bug, | ||
1389 | * which can save timeout delay. | ||
1390 | */ | ||
1391 | if (irq_status & PORT_IRQ_BAD_PMP) | ||
1392 | return -EIO; | ||
1393 | |||
1394 | return ata_check_ready(status); | ||
1395 | } | ||
1396 | |||
1397 | static int ahci_sb600_softreset(struct ata_link *link, unsigned int *class, | ||
1398 | unsigned long deadline) | ||
1399 | { | ||
1400 | struct ata_port *ap = link->ap; | ||
1401 | void __iomem *port_mmio = ahci_port_base(ap); | ||
1402 | int pmp = sata_srst_pmp(link); | ||
1403 | int rc; | ||
1404 | u32 irq_sts; | ||
1405 | |||
1406 | DPRINTK("ENTER\n"); | ||
1407 | |||
1408 | rc = ahci_do_softreset(link, class, pmp, deadline, | ||
1409 | ahci_sb600_check_ready); | ||
1410 | |||
1411 | /* | ||
1412 | * Soft reset fails on some ATI chips with IPMS set when PMP | ||
1413 | * is enabled but SATA HDD/ODD is connected to SATA port, | ||
1414 | * do soft reset again to port 0. | ||
1415 | */ | ||
1416 | if (rc == -EIO) { | ||
1417 | irq_sts = readl(port_mmio + PORT_IRQ_STAT); | ||
1418 | if (irq_sts & PORT_IRQ_BAD_PMP) { | ||
1419 | ata_link_printk(link, KERN_WARNING, | ||
1420 | "failed due to HW bug, retry pmp=0\n"); | ||
1421 | rc = ahci_do_softreset(link, class, 0, deadline, | ||
1422 | ahci_check_ready); | ||
1423 | } | ||
1424 | } | ||
1425 | |||
1426 | return rc; | ||
1427 | } | ||
1428 | |||
1331 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, | 1429 | static int ahci_hardreset(struct ata_link *link, unsigned int *class, |
1332 | unsigned long deadline) | 1430 | unsigned long deadline) |
1333 | { | 1431 | { |
@@ -1679,7 +1777,7 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance) | |||
1679 | struct ahci_host_priv *hpriv; | 1777 | struct ahci_host_priv *hpriv; |
1680 | unsigned int i, handled = 0; | 1778 | unsigned int i, handled = 0; |
1681 | void __iomem *mmio; | 1779 | void __iomem *mmio; |
1682 | u32 irq_stat, irq_ack = 0; | 1780 | u32 irq_stat, irq_masked; |
1683 | 1781 | ||
1684 | VPRINTK("ENTER\n"); | 1782 | VPRINTK("ENTER\n"); |
1685 | 1783 | ||
@@ -1688,16 +1786,17 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance) | |||
1688 | 1786 | ||
1689 | /* sigh. 0xffffffff is a valid return from h/w */ | 1787 | /* sigh. 0xffffffff is a valid return from h/w */ |
1690 | irq_stat = readl(mmio + HOST_IRQ_STAT); | 1788 | irq_stat = readl(mmio + HOST_IRQ_STAT); |
1691 | irq_stat &= hpriv->port_map; | ||
1692 | if (!irq_stat) | 1789 | if (!irq_stat) |
1693 | return IRQ_NONE; | 1790 | return IRQ_NONE; |
1694 | 1791 | ||
1792 | irq_masked = irq_stat & hpriv->port_map; | ||
1793 | |||
1695 | spin_lock(&host->lock); | 1794 | spin_lock(&host->lock); |
1696 | 1795 | ||
1697 | for (i = 0; i < host->n_ports; i++) { | 1796 | for (i = 0; i < host->n_ports; i++) { |
1698 | struct ata_port *ap; | 1797 | struct ata_port *ap; |
1699 | 1798 | ||
1700 | if (!(irq_stat & (1 << i))) | 1799 | if (!(irq_masked & (1 << i))) |
1701 | continue; | 1800 | continue; |
1702 | 1801 | ||
1703 | ap = host->ports[i]; | 1802 | ap = host->ports[i]; |
@@ -1711,14 +1810,20 @@ static irqreturn_t ahci_interrupt(int irq, void *dev_instance) | |||
1711 | "interrupt on disabled port %u\n", i); | 1810 | "interrupt on disabled port %u\n", i); |
1712 | } | 1811 | } |
1713 | 1812 | ||
1714 | irq_ack |= (1 << i); | ||
1715 | } | ||
1716 | |||
1717 | if (irq_ack) { | ||
1718 | writel(irq_ack, mmio + HOST_IRQ_STAT); | ||
1719 | handled = 1; | 1813 | handled = 1; |
1720 | } | 1814 | } |
1721 | 1815 | ||
1816 | /* HOST_IRQ_STAT behaves as level triggered latch meaning that | ||
1817 | * it should be cleared after all the port events are cleared; | ||
1818 | * otherwise, it will raise a spurious interrupt after each | ||
1819 | * valid one. Please read section 10.6.2 of ahci 1.1 for more | ||
1820 | * information. | ||
1821 | * | ||
1822 | * Also, use the unmasked value to clear interrupt as spurious | ||
1823 | * pending event on a dummy port might cause screaming IRQ. | ||
1824 | */ | ||
1825 | writel(irq_stat, mmio + HOST_IRQ_STAT); | ||
1826 | |||
1722 | spin_unlock(&host->lock); | 1827 | spin_unlock(&host->lock); |
1723 | 1828 | ||
1724 | VPRINTK("EXIT\n"); | 1829 | VPRINTK("EXIT\n"); |
@@ -2118,7 +2223,8 @@ static void ahci_p5wdh_workaround(struct ata_host *host) | |||
2118 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | 2223 | static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) |
2119 | { | 2224 | { |
2120 | static int printed_version; | 2225 | static int printed_version; |
2121 | struct ata_port_info pi = ahci_port_info[ent->driver_data]; | 2226 | unsigned int board_id = ent->driver_data; |
2227 | struct ata_port_info pi = ahci_port_info[board_id]; | ||
2122 | const struct ata_port_info *ppi[] = { &pi, NULL }; | 2228 | const struct ata_port_info *ppi[] = { &pi, NULL }; |
2123 | struct device *dev = &pdev->dev; | 2229 | struct device *dev = &pdev->dev; |
2124 | struct ahci_host_priv *hpriv; | 2230 | struct ahci_host_priv *hpriv; |
@@ -2167,6 +2273,11 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2167 | return -ENOMEM; | 2273 | return -ENOMEM; |
2168 | hpriv->flags |= (unsigned long)pi.private_data; | 2274 | hpriv->flags |= (unsigned long)pi.private_data; |
2169 | 2275 | ||
2276 | /* MCP65 revision A1 and A2 can't do MSI */ | ||
2277 | if (board_id == board_ahci_mcp65 && | ||
2278 | (pdev->revision == 0xa1 || pdev->revision == 0xa2)) | ||
2279 | hpriv->flags |= AHCI_HFLAG_NO_MSI; | ||
2280 | |||
2170 | if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) | 2281 | if ((hpriv->flags & AHCI_HFLAG_NO_MSI) || pci_enable_msi(pdev)) |
2171 | pci_intx(pdev, 1); | 2282 | pci_intx(pdev, 1); |
2172 | 2283 | ||
diff --git a/drivers/ata/ata_piix.c b/drivers/ata/ata_piix.c index a9027b8fbdd5..a90ae03f56b2 100644 --- a/drivers/ata/ata_piix.c +++ b/drivers/ata/ata_piix.c | |||
@@ -247,10 +247,11 @@ static const struct pci_device_id piix_pci_tbl[] = { | |||
247 | { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | 247 | { 0x8086, 0x2820, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
248 | /* SATA Controller 2 IDE (ICH8) */ | 248 | /* SATA Controller 2 IDE (ICH8) */ |
249 | { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, | 249 | { 0x8086, 0x2825, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, |
250 | /* Mobile SATA Controller IDE (ICH8M) */ | ||
251 | { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
252 | /* Mobile SATA Controller IDE (ICH8M), Apple */ | 250 | /* Mobile SATA Controller IDE (ICH8M), Apple */ |
253 | { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, | 251 | { 0x8086, 0x2828, 0x106b, 0x00a0, 0, 0, ich8m_apple_sata }, |
252 | { 0x8086, 0x2828, 0x106b, 0x00a1, 0, 0, ich8m_apple_sata }, | ||
253 | /* Mobile SATA Controller IDE (ICH8M) */ | ||
254 | { 0x8086, 0x2828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | ||
254 | /* SATA Controller IDE (ICH9) */ | 255 | /* SATA Controller IDE (ICH9) */ |
255 | { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, | 256 | { 0x8086, 0x2920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_sata }, |
256 | /* SATA Controller IDE (ICH9) */ | 257 | /* SATA Controller IDE (ICH9) */ |
@@ -526,7 +527,7 @@ static struct ata_port_info piix_port_info[] = { | |||
526 | 527 | ||
527 | [ich8m_apple_sata] = | 528 | [ich8m_apple_sata] = |
528 | { | 529 | { |
529 | .flags = PIIX_SATA_FLAGS | PIIX_FLAG_SIDPR, | 530 | .flags = PIIX_SATA_FLAGS, |
530 | .pio_mask = 0x1f, /* pio0-4 */ | 531 | .pio_mask = 0x1f, /* pio0-4 */ |
531 | .mwdma_mask = 0x07, /* mwdma0-2 */ | 532 | .mwdma_mask = 0x07, /* mwdma0-2 */ |
532 | .udma_mask = ATA_UDMA6, | 533 | .udma_mask = ATA_UDMA6, |
@@ -573,6 +574,8 @@ static const struct ich_laptop ich_laptop[] = { | |||
573 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ | 574 | { 0x27DF, 0x1043, 0x1267 }, /* ICH7 on Asus W5F */ |
574 | { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ | 575 | { 0x27DF, 0x103C, 0x30A1 }, /* ICH7 on HP Compaq nc2400 */ |
575 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ | 576 | { 0x24CA, 0x1025, 0x0061 }, /* ICH4 on ACER Aspire 2023WLMi */ |
577 | { 0x24CA, 0x1025, 0x003d }, /* ICH4 on ACER TM290 */ | ||
578 | { 0x266F, 0x1025, 0x0066 }, /* ICH6 on ACER Aspire 1694WLMi */ | ||
576 | { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ | 579 | { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ |
577 | /* end marker */ | 580 | /* end marker */ |
578 | { 0, } | 581 | { 0, } |
@@ -1040,6 +1043,13 @@ static int piix_broken_suspend(void) | |||
1040 | }, | 1043 | }, |
1041 | }, | 1044 | }, |
1042 | { | 1045 | { |
1046 | .ident = "TECRA M4", | ||
1047 | .matches = { | ||
1048 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | ||
1049 | DMI_MATCH(DMI_PRODUCT_NAME, "TECRA M4"), | ||
1050 | }, | ||
1051 | }, | ||
1052 | { | ||
1043 | .ident = "TECRA M5", | 1053 | .ident = "TECRA M5", |
1044 | .matches = { | 1054 | .matches = { |
1045 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), | 1055 | DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), |
diff --git a/drivers/ata/libata-acpi.c b/drivers/ata/libata-acpi.c index dbf6ca781f66..3ff8b14420d9 100644 --- a/drivers/ata/libata-acpi.c +++ b/drivers/ata/libata-acpi.c | |||
@@ -118,12 +118,62 @@ static void ata_acpi_associate_ide_port(struct ata_port *ap) | |||
118 | ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; | 118 | ap->pflags |= ATA_PFLAG_INIT_GTM_VALID; |
119 | } | 119 | } |
120 | 120 | ||
121 | static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device | 121 | static void ata_acpi_eject_device(acpi_handle handle) |
122 | *dev, u32 event) | 122 | { |
123 | struct acpi_object_list arg_list; | ||
124 | union acpi_object arg; | ||
125 | |||
126 | arg_list.count = 1; | ||
127 | arg_list.pointer = &arg; | ||
128 | arg.type = ACPI_TYPE_INTEGER; | ||
129 | arg.integer.value = 1; | ||
130 | |||
131 | if (ACPI_FAILURE(acpi_evaluate_object(handle, "_EJ0", | ||
132 | &arg_list, NULL))) | ||
133 | printk(KERN_ERR "Failed to evaluate _EJ0!\n"); | ||
134 | } | ||
135 | |||
136 | /* @ap and @dev are the same as ata_acpi_handle_hotplug() */ | ||
137 | static void ata_acpi_detach_device(struct ata_port *ap, struct ata_device *dev) | ||
138 | { | ||
139 | if (dev) | ||
140 | dev->flags |= ATA_DFLAG_DETACH; | ||
141 | else { | ||
142 | struct ata_link *tlink; | ||
143 | struct ata_device *tdev; | ||
144 | |||
145 | ata_port_for_each_link(tlink, ap) | ||
146 | ata_link_for_each_dev(tdev, tlink) | ||
147 | tdev->flags |= ATA_DFLAG_DETACH; | ||
148 | } | ||
149 | |||
150 | ata_port_schedule_eh(ap); | ||
151 | } | ||
152 | |||
153 | /** | ||
154 | * ata_acpi_handle_hotplug - ACPI event handler backend | ||
155 | * @ap: ATA port ACPI event occurred | ||
156 | * @dev: ATA device ACPI event occurred (can be NULL) | ||
157 | * @event: ACPI event which occurred | ||
158 | * @is_dock_event: boolean indicating whether the event was a dock one | ||
159 | * | ||
160 | * All ACPI bay / device realted events end up in this function. If | ||
161 | * the event is port-wide @dev is NULL. If the event is specific to a | ||
162 | * device, @dev points to it. | ||
163 | * | ||
164 | * Hotplug (as opposed to unplug) notification is always handled as | ||
165 | * port-wide while unplug only kills the target device on device-wide | ||
166 | * event. | ||
167 | * | ||
168 | * LOCKING: | ||
169 | * ACPI notify handler context. May sleep. | ||
170 | */ | ||
171 | static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device *dev, | ||
172 | u32 event, int is_dock_event) | ||
123 | { | 173 | { |
124 | char event_string[12]; | 174 | char event_string[12]; |
125 | char *envp[] = { event_string, NULL }; | 175 | char *envp[] = { event_string, NULL }; |
126 | struct ata_eh_info *ehi; | 176 | struct ata_eh_info *ehi = &ap->link.eh_info; |
127 | struct kobject *kobj = NULL; | 177 | struct kobject *kobj = NULL; |
128 | int wait = 0; | 178 | int wait = 0; |
129 | unsigned long flags; | 179 | unsigned long flags; |
@@ -131,87 +181,100 @@ static void ata_acpi_handle_hotplug(struct ata_port *ap, struct ata_device | |||
131 | unsigned long sta; | 181 | unsigned long sta; |
132 | acpi_status status; | 182 | acpi_status status; |
133 | 183 | ||
134 | if (!ap) | 184 | if (dev) { |
135 | ap = dev->link->ap; | 185 | if (dev->sdev) |
136 | ehi = &ap->link.eh_info; | 186 | kobj = &dev->sdev->sdev_gendev.kobj; |
137 | |||
138 | spin_lock_irqsave(ap->lock, flags); | ||
139 | |||
140 | if (dev) | ||
141 | handle = dev->acpi_handle; | 187 | handle = dev->acpi_handle; |
142 | else | 188 | } else { |
189 | kobj = &ap->dev->kobj; | ||
143 | handle = ap->acpi_handle; | 190 | handle = ap->acpi_handle; |
191 | } | ||
144 | 192 | ||
145 | status = acpi_get_handle(handle, "_EJ0", &tmphandle); | 193 | status = acpi_get_handle(handle, "_EJ0", &tmphandle); |
146 | if (ACPI_FAILURE(status)) { | 194 | if (ACPI_FAILURE(status)) |
147 | /* This device is not ejectable */ | 195 | /* This device does not support hotplug */ |
148 | spin_unlock_irqrestore(ap->lock, flags); | ||
149 | return; | 196 | return; |
150 | } | ||
151 | 197 | ||
152 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); | 198 | spin_lock_irqsave(ap->lock, flags); |
153 | if (ACPI_FAILURE(status)) { | ||
154 | printk ("Unable to determine bay status\n"); | ||
155 | spin_unlock_irqrestore(ap->lock, flags); | ||
156 | return; | ||
157 | } | ||
158 | 199 | ||
159 | switch (event) { | 200 | switch (event) { |
160 | case ACPI_NOTIFY_BUS_CHECK: | 201 | case ACPI_NOTIFY_BUS_CHECK: |
161 | case ACPI_NOTIFY_DEVICE_CHECK: | 202 | case ACPI_NOTIFY_DEVICE_CHECK: |
162 | ata_ehi_push_desc(ehi, "ACPI event"); | 203 | ata_ehi_push_desc(ehi, "ACPI event"); |
163 | if (!sta) { | 204 | |
164 | /* Device has been unplugged */ | 205 | status = acpi_evaluate_integer(handle, "_STA", NULL, &sta); |
165 | if (dev) | 206 | if (ACPI_FAILURE(status)) { |
166 | dev->flags |= ATA_DFLAG_DETACH; | 207 | ata_port_printk(ap, KERN_ERR, |
167 | else { | 208 | "acpi: failed to determine bay status (0x%x)\n", |
168 | struct ata_link *tlink; | 209 | status); |
169 | struct ata_device *tdev; | 210 | break; |
170 | 211 | } | |
171 | ata_port_for_each_link(tlink, ap) { | 212 | |
172 | ata_link_for_each_dev(tdev, tlink) { | 213 | if (sta) { |
173 | tdev->flags |= | ||
174 | ATA_DFLAG_DETACH; | ||
175 | } | ||
176 | } | ||
177 | } | ||
178 | ata_port_schedule_eh(ap); | ||
179 | wait = 1; | ||
180 | } else { | ||
181 | ata_ehi_hotplugged(ehi); | 214 | ata_ehi_hotplugged(ehi); |
182 | ata_port_freeze(ap); | 215 | ata_port_freeze(ap); |
216 | } else { | ||
217 | /* The device has gone - unplug it */ | ||
218 | ata_acpi_detach_device(ap, dev); | ||
219 | wait = 1; | ||
183 | } | 220 | } |
221 | break; | ||
222 | case ACPI_NOTIFY_EJECT_REQUEST: | ||
223 | ata_ehi_push_desc(ehi, "ACPI event"); | ||
224 | |||
225 | if (!is_dock_event) | ||
226 | break; | ||
227 | |||
228 | /* undock event - immediate unplug */ | ||
229 | ata_acpi_detach_device(ap, dev); | ||
230 | wait = 1; | ||
231 | break; | ||
184 | } | 232 | } |
185 | 233 | ||
234 | /* make sure kobj doesn't go away while ap->lock is released */ | ||
235 | kobject_get(kobj); | ||
236 | |||
186 | spin_unlock_irqrestore(ap->lock, flags); | 237 | spin_unlock_irqrestore(ap->lock, flags); |
187 | 238 | ||
188 | if (wait) | 239 | if (wait) { |
189 | ata_port_wait_eh(ap); | 240 | ata_port_wait_eh(ap); |
241 | ata_acpi_eject_device(handle); | ||
242 | } | ||
190 | 243 | ||
191 | if (dev) { | 244 | if (kobj && !is_dock_event) { |
192 | if (dev->sdev) | ||
193 | kobj = &dev->sdev->sdev_gendev.kobj; | ||
194 | } else | ||
195 | kobj = &ap->dev->kobj; | ||
196 | |||
197 | if (kobj) { | ||
198 | sprintf(event_string, "BAY_EVENT=%d", event); | 245 | sprintf(event_string, "BAY_EVENT=%d", event); |
199 | kobject_uevent_env(kobj, KOBJ_CHANGE, envp); | 246 | kobject_uevent_env(kobj, KOBJ_CHANGE, envp); |
200 | } | 247 | } |
248 | |||
249 | kobject_put(kobj); | ||
250 | } | ||
251 | |||
252 | static void ata_acpi_dev_notify_dock(acpi_handle handle, u32 event, void *data) | ||
253 | { | ||
254 | struct ata_device *dev = data; | ||
255 | |||
256 | ata_acpi_handle_hotplug(dev->link->ap, dev, event, 1); | ||
257 | } | ||
258 | |||
259 | static void ata_acpi_ap_notify_dock(acpi_handle handle, u32 event, void *data) | ||
260 | { | ||
261 | struct ata_port *ap = data; | ||
262 | |||
263 | ata_acpi_handle_hotplug(ap, NULL, event, 1); | ||
201 | } | 264 | } |
202 | 265 | ||
203 | static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) | 266 | static void ata_acpi_dev_notify(acpi_handle handle, u32 event, void *data) |
204 | { | 267 | { |
205 | struct ata_device *dev = data; | 268 | struct ata_device *dev = data; |
206 | 269 | ||
207 | ata_acpi_handle_hotplug(NULL, dev, event); | 270 | ata_acpi_handle_hotplug(dev->link->ap, dev, event, 0); |
208 | } | 271 | } |
209 | 272 | ||
210 | static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) | 273 | static void ata_acpi_ap_notify(acpi_handle handle, u32 event, void *data) |
211 | { | 274 | { |
212 | struct ata_port *ap = data; | 275 | struct ata_port *ap = data; |
213 | 276 | ||
214 | ata_acpi_handle_hotplug(ap, NULL, event); | 277 | ata_acpi_handle_hotplug(ap, NULL, event, 0); |
215 | } | 278 | } |
216 | 279 | ||
217 | /** | 280 | /** |
@@ -252,7 +315,7 @@ void ata_acpi_associate(struct ata_host *host) | |||
252 | ata_acpi_ap_notify, ap); | 315 | ata_acpi_ap_notify, ap); |
253 | /* we might be on a docking station */ | 316 | /* we might be on a docking station */ |
254 | register_hotplug_dock_device(ap->acpi_handle, | 317 | register_hotplug_dock_device(ap->acpi_handle, |
255 | ata_acpi_ap_notify, ap); | 318 | ata_acpi_ap_notify_dock, ap); |
256 | } | 319 | } |
257 | 320 | ||
258 | for (j = 0; j < ata_link_max_devices(&ap->link); j++) { | 321 | for (j = 0; j < ata_link_max_devices(&ap->link); j++) { |
@@ -264,7 +327,7 @@ void ata_acpi_associate(struct ata_host *host) | |||
264 | ata_acpi_dev_notify, dev); | 327 | ata_acpi_dev_notify, dev); |
265 | /* we might be on a docking station */ | 328 | /* we might be on a docking station */ |
266 | register_hotplug_dock_device(dev->acpi_handle, | 329 | register_hotplug_dock_device(dev->acpi_handle, |
267 | ata_acpi_dev_notify, dev); | 330 | ata_acpi_dev_notify_dock, dev); |
268 | } | 331 | } |
269 | } | 332 | } |
270 | } | 333 | } |
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 3c89f205c83f..303fc0d2b978 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c | |||
@@ -4297,7 +4297,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
4297 | } | 4297 | } |
4298 | 4298 | ||
4299 | /** | 4299 | /** |
4300 | * ata_check_atapi_dma - Check whether ATAPI DMA can be supported | 4300 | * atapi_check_dma - Check whether ATAPI DMA can be supported |
4301 | * @qc: Metadata associated with taskfile to check | 4301 | * @qc: Metadata associated with taskfile to check |
4302 | * | 4302 | * |
4303 | * Allow low-level driver to filter ATA PACKET commands, returning | 4303 | * Allow low-level driver to filter ATA PACKET commands, returning |
@@ -4310,7 +4310,7 @@ void ata_sg_clean(struct ata_queued_cmd *qc) | |||
4310 | * RETURNS: 0 when ATAPI DMA can be used | 4310 | * RETURNS: 0 when ATAPI DMA can be used |
4311 | * nonzero otherwise | 4311 | * nonzero otherwise |
4312 | */ | 4312 | */ |
4313 | int ata_check_atapi_dma(struct ata_queued_cmd *qc) | 4313 | int atapi_check_dma(struct ata_queued_cmd *qc) |
4314 | { | 4314 | { |
4315 | struct ata_port *ap = qc->ap; | 4315 | struct ata_port *ap = qc->ap; |
4316 | 4316 | ||
@@ -5403,7 +5403,7 @@ static void ata_host_stop(struct device *gendev, void *res) | |||
5403 | */ | 5403 | */ |
5404 | static void ata_finalize_port_ops(struct ata_port_operations *ops) | 5404 | static void ata_finalize_port_ops(struct ata_port_operations *ops) |
5405 | { | 5405 | { |
5406 | static spinlock_t lock = SPIN_LOCK_UNLOCKED; | 5406 | static DEFINE_SPINLOCK(lock); |
5407 | const struct ata_port_operations *cur; | 5407 | const struct ata_port_operations *cur; |
5408 | void **begin = (void **)ops; | 5408 | void **begin = (void **)ops; |
5409 | void **end = (void **)&ops->inherits; | 5409 | void **end = (void **)&ops->inherits; |
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index 0f9386d4a5a0..7daf4c0f6216 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c | |||
@@ -322,9 +322,12 @@ static void sata_pmp_quirks(struct ata_port *ap) | |||
322 | if (vendor == 0x1095 && devid == 0x3726) { | 322 | if (vendor == 0x1095 && devid == 0x3726) { |
323 | /* sil3726 quirks */ | 323 | /* sil3726 quirks */ |
324 | ata_port_for_each_link(link, ap) { | 324 | ata_port_for_each_link(link, ap) { |
325 | /* class code report is unreliable */ | 325 | /* Class code report is unreliable and SRST |
326 | * times out under certain configurations. | ||
327 | */ | ||
326 | if (link->pmp < 5) | 328 | if (link->pmp < 5) |
327 | link->flags |= ATA_LFLAG_ASSUME_ATA; | 329 | link->flags |= ATA_LFLAG_NO_SRST | |
330 | ATA_LFLAG_ASSUME_ATA; | ||
328 | 331 | ||
329 | /* port 5 is for SEMB device and it doesn't like SRST */ | 332 | /* port 5 is for SEMB device and it doesn't like SRST */ |
330 | if (link->pmp == 5) | 333 | if (link->pmp == 5) |
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index aeb6e01d82ce..57a43649a461 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c | |||
@@ -1637,6 +1637,7 @@ defer: | |||
1637 | 1637 | ||
1638 | /** | 1638 | /** |
1639 | * ata_scsi_rbuf_get - Map response buffer. | 1639 | * ata_scsi_rbuf_get - Map response buffer. |
1640 | * @cmd: SCSI command containing buffer to be mapped. | ||
1640 | * @flags: unsigned long variable to store irq enable status | 1641 | * @flags: unsigned long variable to store irq enable status |
1641 | * @copy_in: copy in from user buffer | 1642 | * @copy_in: copy in from user buffer |
1642 | * | 1643 | * |
@@ -1954,7 +1955,7 @@ static unsigned int ata_msense_ctl_mode(u8 *buf) | |||
1954 | 1955 | ||
1955 | /** | 1956 | /** |
1956 | * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page | 1957 | * ata_msense_rw_recovery - Simulate MODE SENSE r/w error recovery page |
1957 | * @bufp: output buffer | 1958 | * @buf: output buffer |
1958 | * | 1959 | * |
1959 | * Generate a generic MODE SENSE r/w error recovery page. | 1960 | * Generate a generic MODE SENSE r/w error recovery page. |
1960 | * | 1961 | * |
@@ -2342,8 +2343,8 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2342 | { | 2343 | { |
2343 | struct scsi_cmnd *scmd = qc->scsicmd; | 2344 | struct scsi_cmnd *scmd = qc->scsicmd; |
2344 | struct ata_device *dev = qc->dev; | 2345 | struct ata_device *dev = qc->dev; |
2345 | int using_pio = (dev->flags & ATA_DFLAG_PIO); | ||
2346 | int nodata = (scmd->sc_data_direction == DMA_NONE); | 2346 | int nodata = (scmd->sc_data_direction == DMA_NONE); |
2347 | int using_pio = !nodata && (dev->flags & ATA_DFLAG_PIO); | ||
2347 | unsigned int nbytes; | 2348 | unsigned int nbytes; |
2348 | 2349 | ||
2349 | memset(qc->cdb, 0, dev->cdb_len); | 2350 | memset(qc->cdb, 0, dev->cdb_len); |
@@ -2361,7 +2362,7 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2361 | ata_qc_set_pc_nbytes(qc); | 2362 | ata_qc_set_pc_nbytes(qc); |
2362 | 2363 | ||
2363 | /* check whether ATAPI DMA is safe */ | 2364 | /* check whether ATAPI DMA is safe */ |
2364 | if (!using_pio && ata_check_atapi_dma(qc)) | 2365 | if (!nodata && !using_pio && atapi_check_dma(qc)) |
2365 | using_pio = 1; | 2366 | using_pio = 1; |
2366 | 2367 | ||
2367 | /* Some controller variants snoop this value for Packet | 2368 | /* Some controller variants snoop this value for Packet |
@@ -2401,13 +2402,11 @@ static unsigned int atapi_xlat(struct ata_queued_cmd *qc) | |||
2401 | qc->tf.lbam = (nbytes & 0xFF); | 2402 | qc->tf.lbam = (nbytes & 0xFF); |
2402 | qc->tf.lbah = (nbytes >> 8); | 2403 | qc->tf.lbah = (nbytes >> 8); |
2403 | 2404 | ||
2404 | if (using_pio || nodata) { | 2405 | if (nodata) |
2405 | /* no data, or PIO data xfer */ | 2406 | qc->tf.protocol = ATAPI_PROT_NODATA; |
2406 | if (nodata) | 2407 | else if (using_pio) |
2407 | qc->tf.protocol = ATAPI_PROT_NODATA; | 2408 | qc->tf.protocol = ATAPI_PROT_PIO; |
2408 | else | 2409 | else { |
2409 | qc->tf.protocol = ATAPI_PROT_PIO; | ||
2410 | } else { | ||
2411 | /* DMA data xfer */ | 2410 | /* DMA data xfer */ |
2412 | qc->tf.protocol = ATAPI_PROT_DMA; | 2411 | qc->tf.protocol = ATAPI_PROT_DMA; |
2413 | qc->tf.feature |= ATAPI_PKT_DMA; | 2412 | qc->tf.feature |= ATAPI_PKT_DMA; |
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index 3c2d2289f85e..c0908c225483 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c | |||
@@ -247,7 +247,7 @@ u8 ata_sff_check_status(struct ata_port *ap) | |||
247 | * LOCKING: | 247 | * LOCKING: |
248 | * Inherited from caller. | 248 | * Inherited from caller. |
249 | */ | 249 | */ |
250 | u8 ata_sff_altstatus(struct ata_port *ap) | 250 | static u8 ata_sff_altstatus(struct ata_port *ap) |
251 | { | 251 | { |
252 | if (ap->ops->sff_check_altstatus) | 252 | if (ap->ops->sff_check_altstatus) |
253 | return ap->ops->sff_check_altstatus(ap); | 253 | return ap->ops->sff_check_altstatus(ap); |
@@ -256,6 +256,93 @@ u8 ata_sff_altstatus(struct ata_port *ap) | |||
256 | } | 256 | } |
257 | 257 | ||
258 | /** | 258 | /** |
259 | * ata_sff_irq_status - Check if the device is busy | ||
260 | * @ap: port where the device is | ||
261 | * | ||
262 | * Determine if the port is currently busy. Uses altstatus | ||
263 | * if available in order to avoid clearing shared IRQ status | ||
264 | * when finding an IRQ source. Non ctl capable devices don't | ||
265 | * share interrupt lines fortunately for us. | ||
266 | * | ||
267 | * LOCKING: | ||
268 | * Inherited from caller. | ||
269 | */ | ||
270 | static u8 ata_sff_irq_status(struct ata_port *ap) | ||
271 | { | ||
272 | u8 status; | ||
273 | |||
274 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { | ||
275 | status = ata_sff_altstatus(ap); | ||
276 | /* Not us: We are busy */ | ||
277 | if (status & ATA_BUSY) | ||
278 | return status; | ||
279 | } | ||
280 | /* Clear INTRQ latch */ | ||
281 | status = ap->ops->sff_check_status(ap); | ||
282 | return status; | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * ata_sff_sync - Flush writes | ||
287 | * @ap: Port to wait for. | ||
288 | * | ||
289 | * CAUTION: | ||
290 | * If we have an mmio device with no ctl and no altstatus | ||
291 | * method this will fail. No such devices are known to exist. | ||
292 | * | ||
293 | * LOCKING: | ||
294 | * Inherited from caller. | ||
295 | */ | ||
296 | |||
297 | static void ata_sff_sync(struct ata_port *ap) | ||
298 | { | ||
299 | if (ap->ops->sff_check_altstatus) | ||
300 | ap->ops->sff_check_altstatus(ap); | ||
301 | else if (ap->ioaddr.altstatus_addr) | ||
302 | ioread8(ap->ioaddr.altstatus_addr); | ||
303 | } | ||
304 | |||
305 | /** | ||
306 | * ata_sff_pause - Flush writes and wait 400nS | ||
307 | * @ap: Port to pause for. | ||
308 | * | ||
309 | * CAUTION: | ||
310 | * If we have an mmio device with no ctl and no altstatus | ||
311 | * method this will fail. No such devices are known to exist. | ||
312 | * | ||
313 | * LOCKING: | ||
314 | * Inherited from caller. | ||
315 | */ | ||
316 | |||
317 | void ata_sff_pause(struct ata_port *ap) | ||
318 | { | ||
319 | ata_sff_sync(ap); | ||
320 | ndelay(400); | ||
321 | } | ||
322 | |||
323 | /** | ||
324 | * ata_sff_dma_pause - Pause before commencing DMA | ||
325 | * @ap: Port to pause for. | ||
326 | * | ||
327 | * Perform I/O fencing and ensure sufficient cycle delays occur | ||
328 | * for the HDMA1:0 transition | ||
329 | */ | ||
330 | |||
331 | void ata_sff_dma_pause(struct ata_port *ap) | ||
332 | { | ||
333 | if (ap->ops->sff_check_altstatus || ap->ioaddr.altstatus_addr) { | ||
334 | /* An altstatus read will cause the needed delay without | ||
335 | messing up the IRQ status */ | ||
336 | ata_sff_altstatus(ap); | ||
337 | return; | ||
338 | } | ||
339 | /* There are no DMA controllers without ctl. BUG here to ensure | ||
340 | we never violate the HDMA1:0 transition timing and risk | ||
341 | corruption. */ | ||
342 | BUG(); | ||
343 | } | ||
344 | |||
345 | /** | ||
259 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout | 346 | * ata_sff_busy_sleep - sleep until BSY clears, or timeout |
260 | * @ap: port containing status register to be polled | 347 | * @ap: port containing status register to be polled |
261 | * @tmout_pat: impatience timeout | 348 | * @tmout_pat: impatience timeout |
@@ -742,7 +829,7 @@ static void ata_pio_sectors(struct ata_queued_cmd *qc) | |||
742 | } else | 829 | } else |
743 | ata_pio_sector(qc); | 830 | ata_pio_sector(qc); |
744 | 831 | ||
745 | ata_sff_altstatus(qc->ap); /* flush */ | 832 | ata_sff_sync(qc->ap); /* flush */ |
746 | } | 833 | } |
747 | 834 | ||
748 | /** | 835 | /** |
@@ -763,8 +850,9 @@ static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc) | |||
763 | WARN_ON(qc->dev->cdb_len < 12); | 850 | WARN_ON(qc->dev->cdb_len < 12); |
764 | 851 | ||
765 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); | 852 | ap->ops->sff_data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1); |
766 | ata_sff_altstatus(ap); /* flush */ | 853 | ata_sff_sync(ap); |
767 | 854 | /* FIXME: If the CDB is for DMA do we need to do the transition delay | |
855 | or is bmdma_start guaranteed to do it ? */ | ||
768 | switch (qc->tf.protocol) { | 856 | switch (qc->tf.protocol) { |
769 | case ATAPI_PROT_PIO: | 857 | case ATAPI_PROT_PIO: |
770 | ap->hsm_task_state = HSM_ST; | 858 | ap->hsm_task_state = HSM_ST; |
@@ -905,7 +993,7 @@ static void atapi_pio_bytes(struct ata_queued_cmd *qc) | |||
905 | 993 | ||
906 | if (unlikely(__atapi_pio_bytes(qc, bytes))) | 994 | if (unlikely(__atapi_pio_bytes(qc, bytes))) |
907 | goto err_out; | 995 | goto err_out; |
908 | ata_sff_altstatus(ap); /* flush */ | 996 | ata_sff_sync(ap); /* flush */ |
909 | 997 | ||
910 | return; | 998 | return; |
911 | 999 | ||
@@ -1006,6 +1094,7 @@ static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq) | |||
1006 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, | 1094 | int ata_sff_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc, |
1007 | u8 status, int in_wq) | 1095 | u8 status, int in_wq) |
1008 | { | 1096 | { |
1097 | struct ata_eh_info *ehi = &ap->link.eh_info; | ||
1009 | unsigned long flags = 0; | 1098 | unsigned long flags = 0; |
1010 | int poll_next; | 1099 | int poll_next; |
1011 | 1100 | ||
@@ -1037,9 +1126,12 @@ fsm_start: | |||
1037 | if (likely(status & (ATA_ERR | ATA_DF))) | 1126 | if (likely(status & (ATA_ERR | ATA_DF))) |
1038 | /* device stops HSM for abort/error */ | 1127 | /* device stops HSM for abort/error */ |
1039 | qc->err_mask |= AC_ERR_DEV; | 1128 | qc->err_mask |= AC_ERR_DEV; |
1040 | else | 1129 | else { |
1041 | /* HSM violation. Let EH handle this */ | 1130 | /* HSM violation. Let EH handle this */ |
1131 | ata_ehi_push_desc(ehi, | ||
1132 | "ST_FIRST: !(DRQ|ERR|DF)"); | ||
1042 | qc->err_mask |= AC_ERR_HSM; | 1133 | qc->err_mask |= AC_ERR_HSM; |
1134 | } | ||
1043 | 1135 | ||
1044 | ap->hsm_task_state = HSM_ST_ERR; | 1136 | ap->hsm_task_state = HSM_ST_ERR; |
1045 | goto fsm_start; | 1137 | goto fsm_start; |
@@ -1058,9 +1150,9 @@ fsm_start: | |||
1058 | * the CDB. | 1150 | * the CDB. |
1059 | */ | 1151 | */ |
1060 | if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { | 1152 | if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) { |
1061 | ata_port_printk(ap, KERN_WARNING, | 1153 | ata_ehi_push_desc(ehi, "ST_FIRST: " |
1062 | "DRQ=1 with device error, " | 1154 | "DRQ=1 with device error, " |
1063 | "dev_stat 0x%X\n", status); | 1155 | "dev_stat 0x%X", status); |
1064 | qc->err_mask |= AC_ERR_HSM; | 1156 | qc->err_mask |= AC_ERR_HSM; |
1065 | ap->hsm_task_state = HSM_ST_ERR; | 1157 | ap->hsm_task_state = HSM_ST_ERR; |
1066 | goto fsm_start; | 1158 | goto fsm_start; |
@@ -1117,9 +1209,9 @@ fsm_start: | |||
1117 | * let the EH abort the command or reset the device. | 1209 | * let the EH abort the command or reset the device. |
1118 | */ | 1210 | */ |
1119 | if (unlikely(status & (ATA_ERR | ATA_DF))) { | 1211 | if (unlikely(status & (ATA_ERR | ATA_DF))) { |
1120 | ata_port_printk(ap, KERN_WARNING, "DRQ=1 with " | 1212 | ata_ehi_push_desc(ehi, "ST-ATAPI: " |
1121 | "device error, dev_stat 0x%X\n", | 1213 | "DRQ=1 with device error, " |
1122 | status); | 1214 | "dev_stat 0x%X", status); |
1123 | qc->err_mask |= AC_ERR_HSM; | 1215 | qc->err_mask |= AC_ERR_HSM; |
1124 | ap->hsm_task_state = HSM_ST_ERR; | 1216 | ap->hsm_task_state = HSM_ST_ERR; |
1125 | goto fsm_start; | 1217 | goto fsm_start; |
@@ -1138,13 +1230,17 @@ fsm_start: | |||
1138 | if (likely(status & (ATA_ERR | ATA_DF))) | 1230 | if (likely(status & (ATA_ERR | ATA_DF))) |
1139 | /* device stops HSM for abort/error */ | 1231 | /* device stops HSM for abort/error */ |
1140 | qc->err_mask |= AC_ERR_DEV; | 1232 | qc->err_mask |= AC_ERR_DEV; |
1141 | else | 1233 | else { |
1142 | /* HSM violation. Let EH handle this. | 1234 | /* HSM violation. Let EH handle this. |
1143 | * Phantom devices also trigger this | 1235 | * Phantom devices also trigger this |
1144 | * condition. Mark hint. | 1236 | * condition. Mark hint. |
1145 | */ | 1237 | */ |
1238 | ata_ehi_push_desc(ehi, "ST-ATA: " | ||
1239 | "DRQ=1 with device error, " | ||
1240 | "dev_stat 0x%X", status); | ||
1146 | qc->err_mask |= AC_ERR_HSM | | 1241 | qc->err_mask |= AC_ERR_HSM | |
1147 | AC_ERR_NODEV_HINT; | 1242 | AC_ERR_NODEV_HINT; |
1243 | } | ||
1148 | 1244 | ||
1149 | ap->hsm_task_state = HSM_ST_ERR; | 1245 | ap->hsm_task_state = HSM_ST_ERR; |
1150 | goto fsm_start; | 1246 | goto fsm_start; |
@@ -1169,8 +1265,12 @@ fsm_start: | |||
1169 | status = ata_wait_idle(ap); | 1265 | status = ata_wait_idle(ap); |
1170 | } | 1266 | } |
1171 | 1267 | ||
1172 | if (status & (ATA_BUSY | ATA_DRQ)) | 1268 | if (status & (ATA_BUSY | ATA_DRQ)) { |
1269 | ata_ehi_push_desc(ehi, "ST-ATA: " | ||
1270 | "BUSY|DRQ persists on ERR|DF, " | ||
1271 | "dev_stat 0x%X", status); | ||
1173 | qc->err_mask |= AC_ERR_HSM; | 1272 | qc->err_mask |= AC_ERR_HSM; |
1273 | } | ||
1174 | 1274 | ||
1175 | /* ata_pio_sectors() might change the | 1275 | /* ata_pio_sectors() might change the |
1176 | * state to HSM_ST_LAST. so, the state | 1276 | * state to HSM_ST_LAST. so, the state |
@@ -1489,14 +1589,10 @@ inline unsigned int ata_sff_host_intr(struct ata_port *ap, | |||
1489 | goto idle_irq; | 1589 | goto idle_irq; |
1490 | } | 1590 | } |
1491 | 1591 | ||
1492 | /* check altstatus */ | ||
1493 | status = ata_sff_altstatus(ap); | ||
1494 | if (status & ATA_BUSY) | ||
1495 | goto idle_irq; | ||
1496 | 1592 | ||
1497 | /* check main status, clearing INTRQ */ | 1593 | /* check main status, clearing INTRQ if needed */ |
1498 | status = ap->ops->sff_check_status(ap); | 1594 | status = ata_sff_irq_status(ap); |
1499 | if (unlikely(status & ATA_BUSY)) | 1595 | if (status & ATA_BUSY) |
1500 | goto idle_irq; | 1596 | goto idle_irq; |
1501 | 1597 | ||
1502 | /* ack bmdma irq events */ | 1598 | /* ack bmdma irq events */ |
@@ -2030,7 +2126,7 @@ void ata_sff_error_handler(struct ata_port *ap) | |||
2030 | ap->ops->bmdma_stop(qc); | 2126 | ap->ops->bmdma_stop(qc); |
2031 | } | 2127 | } |
2032 | 2128 | ||
2033 | ata_sff_altstatus(ap); | 2129 | ata_sff_sync(ap); /* FIXME: We don't need this */ |
2034 | ap->ops->sff_check_status(ap); | 2130 | ap->ops->sff_check_status(ap); |
2035 | ap->ops->sff_irq_clear(ap); | 2131 | ap->ops->sff_irq_clear(ap); |
2036 | 2132 | ||
@@ -2203,7 +2299,7 @@ void ata_bmdma_stop(struct ata_queued_cmd *qc) | |||
2203 | mmio + ATA_DMA_CMD); | 2299 | mmio + ATA_DMA_CMD); |
2204 | 2300 | ||
2205 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 2301 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
2206 | ata_sff_altstatus(ap); /* dummy read */ | 2302 | ata_sff_dma_pause(ap); |
2207 | } | 2303 | } |
2208 | 2304 | ||
2209 | /** | 2305 | /** |
@@ -2722,7 +2818,8 @@ EXPORT_SYMBOL_GPL(ata_sff_qc_prep); | |||
2722 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); | 2818 | EXPORT_SYMBOL_GPL(ata_sff_dumb_qc_prep); |
2723 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); | 2819 | EXPORT_SYMBOL_GPL(ata_sff_dev_select); |
2724 | EXPORT_SYMBOL_GPL(ata_sff_check_status); | 2820 | EXPORT_SYMBOL_GPL(ata_sff_check_status); |
2725 | EXPORT_SYMBOL_GPL(ata_sff_altstatus); | 2821 | EXPORT_SYMBOL_GPL(ata_sff_dma_pause); |
2822 | EXPORT_SYMBOL_GPL(ata_sff_pause); | ||
2726 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); | 2823 | EXPORT_SYMBOL_GPL(ata_sff_busy_sleep); |
2727 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); | 2824 | EXPORT_SYMBOL_GPL(ata_sff_wait_ready); |
2728 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); | 2825 | EXPORT_SYMBOL_GPL(ata_sff_tf_load); |
diff --git a/drivers/ata/libata.h b/drivers/ata/libata.h index 4514283937ea..1cf803adbc95 100644 --- a/drivers/ata/libata.h +++ b/drivers/ata/libata.h | |||
@@ -106,7 +106,7 @@ extern void ata_sg_clean(struct ata_queued_cmd *qc); | |||
106 | extern void ata_qc_free(struct ata_queued_cmd *qc); | 106 | extern void ata_qc_free(struct ata_queued_cmd *qc); |
107 | extern void ata_qc_issue(struct ata_queued_cmd *qc); | 107 | extern void ata_qc_issue(struct ata_queued_cmd *qc); |
108 | extern void __ata_qc_complete(struct ata_queued_cmd *qc); | 108 | extern void __ata_qc_complete(struct ata_queued_cmd *qc); |
109 | extern int ata_check_atapi_dma(struct ata_queued_cmd *qc); | 109 | extern int atapi_check_dma(struct ata_queued_cmd *qc); |
110 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); | 110 | extern void swap_buf_le16(u16 *buf, unsigned int buf_words); |
111 | extern void ata_dev_init(struct ata_device *dev); | 111 | extern void ata_dev_init(struct ata_device *dev); |
112 | extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); | 112 | extern void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp); |
diff --git a/drivers/ata/pata_icside.c b/drivers/ata/pata_icside.c index 17138436423d..cf9e9848f8b5 100644 --- a/drivers/ata/pata_icside.c +++ b/drivers/ata/pata_icside.c | |||
@@ -270,7 +270,7 @@ static void pata_icside_bmdma_stop(struct ata_queued_cmd *qc) | |||
270 | disable_dma(state->dma); | 270 | disable_dma(state->dma); |
271 | 271 | ||
272 | /* see ata_bmdma_stop */ | 272 | /* see ata_bmdma_stop */ |
273 | ata_sff_altstatus(ap); | 273 | ata_sff_dma_pause(ap); |
274 | } | 274 | } |
275 | 275 | ||
276 | static u8 pata_icside_bmdma_status(struct ata_port *ap) | 276 | static u8 pata_icside_bmdma_status(struct ata_port *ap) |
diff --git a/drivers/ata/pata_pcmcia.c b/drivers/ata/pata_pcmcia.c index 3d39f9dfec5a..41b4361bbf6e 100644 --- a/drivers/ata/pata_pcmcia.c +++ b/drivers/ata/pata_pcmcia.c | |||
@@ -414,6 +414,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
414 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), | 414 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDE", 0x547e66dc, 0x5c5ab149), |
415 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), | 415 | PCMCIA_DEVICE_PROD_ID12("IO DATA", "PCIDEII", 0x547e66dc, 0xb3662674), |
416 | PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), | 416 | PCMCIA_DEVICE_PROD_ID12("LOOKMEET", "CBIDE2 ", 0xe37be2b5, 0x8671043b), |
417 | PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF300", 0x7ed2ad87, 0x7e9e78ee), | ||
417 | PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), | 418 | PCMCIA_DEVICE_PROD_ID12("M-Systems", "CF500", 0x7ed2ad87, 0x7a13045c), |
418 | PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), | 419 | PCMCIA_DEVICE_PROD_ID2("NinjaATA-", 0xebe0bd79), |
419 | PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), | 420 | PCMCIA_DEVICE_PROD_ID12("PCMCIA", "CD-ROM", 0x281f1c5d, 0x66536591), |
@@ -424,6 +425,7 @@ static struct pcmcia_device_id pcmcia_devices[] = { | |||
424 | PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), | 425 | PCMCIA_DEVICE_PROD_ID12("SMI VENDOR", "SMI PRODUCT", 0x30896c92, 0x703cc5f6), |
425 | PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), | 426 | PCMCIA_DEVICE_PROD_ID12("TOSHIBA", "MK2001MPL", 0xb4585a1a, 0x3489e003), |
426 | PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), | 427 | PCMCIA_DEVICE_PROD_ID1("TRANSCEND 512M ", 0xd0909443), |
428 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF45", 0x709b1bf1, 0xf68b6f32), | ||
427 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), | 429 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS1GCF80", 0x709b1bf1, 0x2a54d4b1), |
428 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), | 430 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS2GCF120", 0x709b1bf1, 0x969aa4f2), |
429 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), | 431 | PCMCIA_DEVICE_PROD_ID12("TRANSCEND", "TS4GCF120", 0x709b1bf1, 0xf54a91c8), |
diff --git a/drivers/ata/pata_rb532_cf.c b/drivers/ata/pata_rb532_cf.c index a108d259f19d..f8b3ffc8ae9e 100644 --- a/drivers/ata/pata_rb532_cf.c +++ b/drivers/ata/pata_rb532_cf.c | |||
@@ -57,7 +57,9 @@ static inline void rb532_pata_finish_io(struct ata_port *ap) | |||
57 | struct ata_host *ah = ap->host; | 57 | struct ata_host *ah = ap->host; |
58 | struct rb532_cf_info *info = ah->private_data; | 58 | struct rb532_cf_info *info = ah->private_data; |
59 | 59 | ||
60 | ata_sff_altstatus(ap); | 60 | /* FIXME: Keep previous delay. If this is merely a fence then |
61 | ata_sff_sync might be sufficient. */ | ||
62 | ata_sff_dma_pause(ap); | ||
61 | ndelay(RB500_CF_IO_DELAY); | 63 | ndelay(RB500_CF_IO_DELAY); |
62 | 64 | ||
63 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); | 65 | set_irq_type(info->irq, IRQ_TYPE_LEVEL_HIGH); |
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c index e965b251ca24..bbf5aa345e68 100644 --- a/drivers/ata/pata_scc.c +++ b/drivers/ata/pata_scc.c | |||
@@ -726,7 +726,7 @@ static void scc_bmdma_stop (struct ata_queued_cmd *qc) | |||
726 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); | 726 | in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START); |
727 | 727 | ||
728 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ | 728 | /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */ |
729 | ata_sff_altstatus(ap); /* dummy read */ | 729 | ata_sff_dma_pause(ap); /* dummy read */ |
730 | } | 730 | } |
731 | 731 | ||
732 | /** | 732 | /** |
@@ -747,7 +747,8 @@ static u8 scc_bmdma_status (struct ata_port *ap) | |||
747 | return host_stat; | 747 | return host_stat; |
748 | 748 | ||
749 | /* errata A252,A308 workaround: Step4 */ | 749 | /* errata A252,A308 workaround: Step4 */ |
750 | if ((ata_sff_altstatus(ap) & ATA_ERR) && (int_status & INTSTS_INTRQ)) | 750 | if ((scc_check_altstatus(ap) & ATA_ERR) |
751 | && (int_status & INTSTS_INTRQ)) | ||
751 | return (host_stat | ATA_DMA_INTR); | 752 | return (host_stat | ATA_DMA_INTR); |
752 | 753 | ||
753 | /* errata A308 workaround Step5 */ | 754 | /* errata A308 workaround Step5 */ |
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c index 853559e32315..3924e7209a44 100644 --- a/drivers/ata/sata_fsl.c +++ b/drivers/ata/sata_fsl.c | |||
@@ -34,7 +34,7 @@ enum { | |||
34 | 34 | ||
35 | SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 35 | SATA_FSL_HOST_FLAGS = (ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
36 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | | 36 | ATA_FLAG_MMIO | ATA_FLAG_PIO_DMA | |
37 | ATA_FLAG_NCQ), | 37 | ATA_FLAG_PMP | ATA_FLAG_NCQ), |
38 | 38 | ||
39 | SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, | 39 | SATA_FSL_MAX_CMDS = SATA_FSL_QUEUE_DEPTH, |
40 | SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ | 40 | SATA_FSL_CMD_HDR_SIZE = 16, /* 4 DWORDS */ |
@@ -395,7 +395,7 @@ static void sata_fsl_qc_prep(struct ata_queued_cmd *qc) | |||
395 | cd = (struct command_desc *)pp->cmdentry + tag; | 395 | cd = (struct command_desc *)pp->cmdentry + tag; |
396 | cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE; | 396 | cd_paddr = pp->cmdentry_paddr + tag * SATA_FSL_CMD_DESC_SIZE; |
397 | 397 | ||
398 | ata_tf_to_fis(&qc->tf, 0, 1, (u8 *) &cd->cfis); | 398 | ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, (u8 *) &cd->cfis); |
399 | 399 | ||
400 | VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n", | 400 | VPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x\n", |
401 | cd->cfis[0], cd->cfis[1], cd->cfis[2]); | 401 | cd->cfis[0], cd->cfis[1], cd->cfis[2]); |
@@ -438,6 +438,8 @@ static unsigned int sata_fsl_qc_issue(struct ata_queued_cmd *qc) | |||
438 | ioread32(CA + hcr_base), | 438 | ioread32(CA + hcr_base), |
439 | ioread32(CE + hcr_base), ioread32(CC + hcr_base)); | 439 | ioread32(CE + hcr_base), ioread32(CC + hcr_base)); |
440 | 440 | ||
441 | iowrite32(qc->dev->link->pmp, CQPMP + hcr_base); | ||
442 | |||
441 | /* Simply queue command to the controller/device */ | 443 | /* Simply queue command to the controller/device */ |
442 | iowrite32(1 << tag, CQ + hcr_base); | 444 | iowrite32(1 << tag, CQ + hcr_base); |
443 | 445 | ||
@@ -558,11 +560,36 @@ static void sata_fsl_thaw(struct ata_port *ap) | |||
558 | ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); | 560 | ioread32(hcr_base + HCONTROL), ioread32(hcr_base + HSTATUS)); |
559 | } | 561 | } |
560 | 562 | ||
563 | static void sata_fsl_pmp_attach(struct ata_port *ap) | ||
564 | { | ||
565 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; | ||
566 | void __iomem *hcr_base = host_priv->hcr_base; | ||
567 | u32 temp; | ||
568 | |||
569 | temp = ioread32(hcr_base + HCONTROL); | ||
570 | iowrite32((temp | HCONTROL_PMP_ATTACHED), hcr_base + HCONTROL); | ||
571 | } | ||
572 | |||
573 | static void sata_fsl_pmp_detach(struct ata_port *ap) | ||
574 | { | ||
575 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; | ||
576 | void __iomem *hcr_base = host_priv->hcr_base; | ||
577 | u32 temp; | ||
578 | |||
579 | temp = ioread32(hcr_base + HCONTROL); | ||
580 | temp &= ~HCONTROL_PMP_ATTACHED; | ||
581 | iowrite32(temp, hcr_base + HCONTROL); | ||
582 | |||
583 | /* enable interrupts on the controller/port */ | ||
584 | temp = ioread32(hcr_base + HCONTROL); | ||
585 | iowrite32((temp | DEFAULT_PORT_IRQ_ENABLE_MASK), hcr_base + HCONTROL); | ||
586 | |||
587 | } | ||
588 | |||
561 | static int sata_fsl_port_start(struct ata_port *ap) | 589 | static int sata_fsl_port_start(struct ata_port *ap) |
562 | { | 590 | { |
563 | struct device *dev = ap->host->dev; | 591 | struct device *dev = ap->host->dev; |
564 | struct sata_fsl_port_priv *pp; | 592 | struct sata_fsl_port_priv *pp; |
565 | int retval; | ||
566 | void *mem; | 593 | void *mem; |
567 | dma_addr_t mem_dma; | 594 | dma_addr_t mem_dma; |
568 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; | 595 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; |
@@ -688,12 +715,13 @@ static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline) | |||
688 | } | 715 | } |
689 | 716 | ||
690 | static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, | 717 | static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, |
691 | unsigned long deadline) | 718 | unsigned long deadline) |
692 | { | 719 | { |
693 | struct ata_port *ap = link->ap; | 720 | struct ata_port *ap = link->ap; |
694 | struct sata_fsl_port_priv *pp = ap->private_data; | 721 | struct sata_fsl_port_priv *pp = ap->private_data; |
695 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; | 722 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; |
696 | void __iomem *hcr_base = host_priv->hcr_base; | 723 | void __iomem *hcr_base = host_priv->hcr_base; |
724 | int pmp = sata_srst_pmp(link); | ||
697 | u32 temp; | 725 | u32 temp; |
698 | struct ata_taskfile tf; | 726 | struct ata_taskfile tf; |
699 | u8 *cfis; | 727 | u8 *cfis; |
@@ -703,6 +731,9 @@ static int sata_fsl_softreset(struct ata_link *link, unsigned int *class, | |||
703 | 731 | ||
704 | DPRINTK("in xx_softreset\n"); | 732 | DPRINTK("in xx_softreset\n"); |
705 | 733 | ||
734 | if (pmp != SATA_PMP_CTRL_PORT) | ||
735 | goto issue_srst; | ||
736 | |||
706 | try_offline_again: | 737 | try_offline_again: |
707 | /* | 738 | /* |
708 | * Force host controller to go off-line, aborting current operations | 739 | * Force host controller to go off-line, aborting current operations |
@@ -746,6 +777,7 @@ try_offline_again: | |||
746 | 777 | ||
747 | temp = ioread32(hcr_base + HCONTROL); | 778 | temp = ioread32(hcr_base + HCONTROL); |
748 | temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE); | 779 | temp |= (HCONTROL_ONLINE_PHY_RST | HCONTROL_SNOOP_ENABLE); |
780 | temp |= HCONTROL_PMP_ATTACHED; | ||
749 | iowrite32(temp, hcr_base + HCONTROL); | 781 | iowrite32(temp, hcr_base + HCONTROL); |
750 | 782 | ||
751 | temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500); | 783 | temp = ata_wait_register(hcr_base + HSTATUS, ONLINE, 0, 1, 500); |
@@ -771,7 +803,8 @@ try_offline_again: | |||
771 | ata_port_printk(ap, KERN_WARNING, | 803 | ata_port_printk(ap, KERN_WARNING, |
772 | "No Device OR PHYRDY change,Hstatus = 0x%x\n", | 804 | "No Device OR PHYRDY change,Hstatus = 0x%x\n", |
773 | ioread32(hcr_base + HSTATUS)); | 805 | ioread32(hcr_base + HSTATUS)); |
774 | goto err; | 806 | *class = ATA_DEV_NONE; |
807 | goto out; | ||
775 | } | 808 | } |
776 | 809 | ||
777 | /* | 810 | /* |
@@ -783,7 +816,8 @@ try_offline_again: | |||
783 | 816 | ||
784 | if ((temp & 0xFF) != 0x18) { | 817 | if ((temp & 0xFF) != 0x18) { |
785 | ata_port_printk(ap, KERN_WARNING, "No Signature Update\n"); | 818 | ata_port_printk(ap, KERN_WARNING, "No Signature Update\n"); |
786 | goto err; | 819 | *class = ATA_DEV_NONE; |
820 | goto out; | ||
787 | } else { | 821 | } else { |
788 | ata_port_printk(ap, KERN_INFO, | 822 | ata_port_printk(ap, KERN_INFO, |
789 | "Signature Update detected @ %d msecs\n", | 823 | "Signature Update detected @ %d msecs\n", |
@@ -798,6 +832,7 @@ try_offline_again: | |||
798 | * reached here, we can send a command to the target device | 832 | * reached here, we can send a command to the target device |
799 | */ | 833 | */ |
800 | 834 | ||
835 | issue_srst: | ||
801 | DPRINTK("Sending SRST/device reset\n"); | 836 | DPRINTK("Sending SRST/device reset\n"); |
802 | 837 | ||
803 | ata_tf_init(link->device, &tf); | 838 | ata_tf_init(link->device, &tf); |
@@ -808,7 +843,7 @@ try_offline_again: | |||
808 | SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); | 843 | SRST_CMD | CMD_DESC_SNOOP_ENABLE, 0, 0, 5); |
809 | 844 | ||
810 | tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */ | 845 | tf.ctl |= ATA_SRST; /* setup SRST bit in taskfile control reg */ |
811 | ata_tf_to_fis(&tf, 0, 0, cfis); | 846 | ata_tf_to_fis(&tf, pmp, 0, cfis); |
812 | 847 | ||
813 | DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n", | 848 | DPRINTK("Dumping cfis : 0x%x, 0x%x, 0x%x, 0x%x\n", |
814 | cfis[0], cfis[1], cfis[2], cfis[3]); | 849 | cfis[0], cfis[1], cfis[2], cfis[3]); |
@@ -854,8 +889,10 @@ try_offline_again: | |||
854 | sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5); | 889 | sata_fsl_setup_cmd_hdr_entry(pp, 0, CMD_DESC_SNOOP_ENABLE, 0, 0, 5); |
855 | 890 | ||
856 | tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */ | 891 | tf.ctl &= ~ATA_SRST; /* 2nd H2D Ctl. register FIS */ |
857 | ata_tf_to_fis(&tf, 0, 0, cfis); | 892 | ata_tf_to_fis(&tf, pmp, 0, cfis); |
858 | 893 | ||
894 | if (pmp != SATA_PMP_CTRL_PORT) | ||
895 | iowrite32(pmp, CQPMP + hcr_base); | ||
859 | iowrite32(1, CQ + hcr_base); | 896 | iowrite32(1, CQ + hcr_base); |
860 | msleep(150); /* ?? */ | 897 | msleep(150); /* ?? */ |
861 | 898 | ||
@@ -886,12 +923,21 @@ try_offline_again: | |||
886 | VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); | 923 | VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE)); |
887 | } | 924 | } |
888 | 925 | ||
926 | out: | ||
889 | return 0; | 927 | return 0; |
890 | 928 | ||
891 | err: | 929 | err: |
892 | return -EIO; | 930 | return -EIO; |
893 | } | 931 | } |
894 | 932 | ||
933 | static void sata_fsl_error_handler(struct ata_port *ap) | ||
934 | { | ||
935 | |||
936 | DPRINTK("in xx_error_handler\n"); | ||
937 | sata_pmp_error_handler(ap); | ||
938 | |||
939 | } | ||
940 | |||
895 | static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) | 941 | static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) |
896 | { | 942 | { |
897 | if (qc->flags & ATA_QCFLAG_FAILED) | 943 | if (qc->flags & ATA_QCFLAG_FAILED) |
@@ -905,18 +951,21 @@ static void sata_fsl_post_internal_cmd(struct ata_queued_cmd *qc) | |||
905 | 951 | ||
906 | static void sata_fsl_error_intr(struct ata_port *ap) | 952 | static void sata_fsl_error_intr(struct ata_port *ap) |
907 | { | 953 | { |
908 | struct ata_link *link = &ap->link; | ||
909 | struct ata_eh_info *ehi = &link->eh_info; | ||
910 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; | 954 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; |
911 | void __iomem *hcr_base = host_priv->hcr_base; | 955 | void __iomem *hcr_base = host_priv->hcr_base; |
912 | u32 hstatus, dereg, cereg = 0, SError = 0; | 956 | u32 hstatus, dereg=0, cereg = 0, SError = 0; |
913 | unsigned int err_mask = 0, action = 0; | 957 | unsigned int err_mask = 0, action = 0; |
914 | struct ata_queued_cmd *qc; | 958 | int freeze = 0, abort=0; |
915 | int freeze = 0; | 959 | struct ata_link *link = NULL; |
960 | struct ata_queued_cmd *qc = NULL; | ||
961 | struct ata_eh_info *ehi; | ||
916 | 962 | ||
917 | hstatus = ioread32(hcr_base + HSTATUS); | 963 | hstatus = ioread32(hcr_base + HSTATUS); |
918 | cereg = ioread32(hcr_base + CE); | 964 | cereg = ioread32(hcr_base + CE); |
919 | 965 | ||
966 | /* first, analyze and record host port events */ | ||
967 | link = &ap->link; | ||
968 | ehi = &link->eh_info; | ||
920 | ata_ehi_clear_desc(ehi); | 969 | ata_ehi_clear_desc(ehi); |
921 | 970 | ||
922 | /* | 971 | /* |
@@ -926,42 +975,28 @@ static void sata_fsl_error_intr(struct ata_port *ap) | |||
926 | sata_fsl_scr_read(ap, SCR_ERROR, &SError); | 975 | sata_fsl_scr_read(ap, SCR_ERROR, &SError); |
927 | if (unlikely(SError & 0xFFFF0000)) { | 976 | if (unlikely(SError & 0xFFFF0000)) { |
928 | sata_fsl_scr_write(ap, SCR_ERROR, SError); | 977 | sata_fsl_scr_write(ap, SCR_ERROR, SError); |
929 | err_mask |= AC_ERR_ATA_BUS; | ||
930 | } | 978 | } |
931 | 979 | ||
932 | DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", | 980 | DPRINTK("error_intr,hStat=0x%x,CE=0x%x,DE =0x%x,SErr=0x%x\n", |
933 | hstatus, cereg, ioread32(hcr_base + DE), SError); | 981 | hstatus, cereg, ioread32(hcr_base + DE), SError); |
934 | 982 | ||
935 | /* handle single device errors */ | 983 | /* handle fatal errors */ |
936 | if (cereg) { | 984 | if (hstatus & FATAL_ERROR_DECODE) { |
937 | /* | 985 | ehi->err_mask |= AC_ERR_ATA_BUS; |
938 | * clear the command error, also clears queue to the device | 986 | ehi->action |= ATA_EH_SOFTRESET; |
939 | * in error, and we can (re)issue commands to this device. | ||
940 | * When a device is in error all commands queued into the | ||
941 | * host controller and at the device are considered aborted | ||
942 | * and the queue for that device is stopped. Now, after | ||
943 | * clearing the device error, we can issue commands to the | ||
944 | * device to interrogate it to find the source of the error. | ||
945 | */ | ||
946 | dereg = ioread32(hcr_base + DE); | ||
947 | iowrite32(dereg, hcr_base + DE); | ||
948 | iowrite32(cereg, hcr_base + CE); | ||
949 | 987 | ||
950 | DPRINTK("single device error, CE=0x%x, DE=0x%x\n", | ||
951 | ioread32(hcr_base + CE), ioread32(hcr_base + DE)); | ||
952 | /* | 988 | /* |
953 | * We should consider this as non fatal error, and TF must | 989 | * Ignore serror in case of fatal errors as we always want |
954 | * be updated as done below. | 990 | * to do a soft-reset of the FSL SATA controller. Analyzing |
991 | * serror may cause libata to schedule a hard-reset action, | ||
992 | * and hard-reset currently does not do controller | ||
993 | * offline/online, causing command timeouts and leads to an | ||
994 | * un-recoverable state, hence make libATA ignore | ||
995 | * autopsy in case of fatal errors. | ||
955 | */ | 996 | */ |
956 | 997 | ||
957 | err_mask |= AC_ERR_DEV; | 998 | ehi->flags |= ATA_EHI_NO_AUTOPSY; |
958 | } | ||
959 | 999 | ||
960 | /* handle fatal errors */ | ||
961 | if (hstatus & FATAL_ERROR_DECODE) { | ||
962 | err_mask |= AC_ERR_ATA_BUS; | ||
963 | action |= ATA_EH_RESET; | ||
964 | /* how will fatal error interrupts be completed ?? */ | ||
965 | freeze = 1; | 1000 | freeze = 1; |
966 | } | 1001 | } |
967 | 1002 | ||
@@ -971,30 +1006,83 @@ static void sata_fsl_error_intr(struct ata_port *ap) | |||
971 | 1006 | ||
972 | /* Setup a soft-reset EH action */ | 1007 | /* Setup a soft-reset EH action */ |
973 | ata_ehi_hotplugged(ehi); | 1008 | ata_ehi_hotplugged(ehi); |
1009 | ata_ehi_push_desc(ehi, "%s", "PHY RDY changed"); | ||
974 | freeze = 1; | 1010 | freeze = 1; |
975 | } | 1011 | } |
976 | 1012 | ||
977 | /* record error info */ | 1013 | /* handle single device errors */ |
978 | qc = ata_qc_from_tag(ap, link->active_tag); | 1014 | if (cereg) { |
1015 | /* | ||
1016 | * clear the command error, also clears queue to the device | ||
1017 | * in error, and we can (re)issue commands to this device. | ||
1018 | * When a device is in error all commands queued into the | ||
1019 | * host controller and at the device are considered aborted | ||
1020 | * and the queue for that device is stopped. Now, after | ||
1021 | * clearing the device error, we can issue commands to the | ||
1022 | * device to interrogate it to find the source of the error. | ||
1023 | */ | ||
1024 | abort = 1; | ||
1025 | |||
1026 | DPRINTK("single device error, CE=0x%x, DE=0x%x\n", | ||
1027 | ioread32(hcr_base + CE), ioread32(hcr_base + DE)); | ||
979 | 1028 | ||
980 | if (qc) | 1029 | /* find out the offending link and qc */ |
1030 | if (ap->nr_pmp_links) { | ||
1031 | dereg = ioread32(hcr_base + DE); | ||
1032 | iowrite32(dereg, hcr_base + DE); | ||
1033 | iowrite32(cereg, hcr_base + CE); | ||
1034 | |||
1035 | if (dereg < ap->nr_pmp_links) { | ||
1036 | link = &ap->pmp_link[dereg]; | ||
1037 | ehi = &link->eh_info; | ||
1038 | qc = ata_qc_from_tag(ap, link->active_tag); | ||
1039 | /* | ||
1040 | * We should consider this as non fatal error, | ||
1041 | * and TF must be updated as done below. | ||
1042 | */ | ||
1043 | |||
1044 | err_mask |= AC_ERR_DEV; | ||
1045 | |||
1046 | } else { | ||
1047 | err_mask |= AC_ERR_HSM; | ||
1048 | action |= ATA_EH_HARDRESET; | ||
1049 | freeze = 1; | ||
1050 | } | ||
1051 | } else { | ||
1052 | dereg = ioread32(hcr_base + DE); | ||
1053 | iowrite32(dereg, hcr_base + DE); | ||
1054 | iowrite32(cereg, hcr_base + CE); | ||
1055 | |||
1056 | qc = ata_qc_from_tag(ap, link->active_tag); | ||
1057 | /* | ||
1058 | * We should consider this as non fatal error, | ||
1059 | * and TF must be updated as done below. | ||
1060 | */ | ||
1061 | err_mask |= AC_ERR_DEV; | ||
1062 | } | ||
1063 | } | ||
1064 | |||
1065 | /* record error info */ | ||
1066 | if (qc) { | ||
981 | qc->err_mask |= err_mask; | 1067 | qc->err_mask |= err_mask; |
982 | else | 1068 | } else |
983 | ehi->err_mask |= err_mask; | 1069 | ehi->err_mask |= err_mask; |
984 | 1070 | ||
985 | ehi->action |= action; | 1071 | ehi->action |= action; |
986 | ehi->serror |= SError; | ||
987 | 1072 | ||
988 | /* freeze or abort */ | 1073 | /* freeze or abort */ |
989 | if (freeze) | 1074 | if (freeze) |
990 | ata_port_freeze(ap); | 1075 | ata_port_freeze(ap); |
991 | else | 1076 | else if (abort) { |
992 | ata_port_abort(ap); | 1077 | if (qc) |
1078 | ata_link_abort(qc->dev->link); | ||
1079 | else | ||
1080 | ata_port_abort(ap); | ||
1081 | } | ||
993 | } | 1082 | } |
994 | 1083 | ||
995 | static void sata_fsl_host_intr(struct ata_port *ap) | 1084 | static void sata_fsl_host_intr(struct ata_port *ap) |
996 | { | 1085 | { |
997 | struct ata_link *link = &ap->link; | ||
998 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; | 1086 | struct sata_fsl_host_priv *host_priv = ap->host->private_data; |
999 | void __iomem *hcr_base = host_priv->hcr_base; | 1087 | void __iomem *hcr_base = host_priv->hcr_base; |
1000 | u32 hstatus, qc_active = 0; | 1088 | u32 hstatus, qc_active = 0; |
@@ -1017,10 +1105,19 @@ static void sata_fsl_host_intr(struct ata_port *ap) | |||
1017 | return; | 1105 | return; |
1018 | } | 1106 | } |
1019 | 1107 | ||
1020 | if (link->sactive) { /* only true for NCQ commands */ | 1108 | /* Read command completed register */ |
1109 | qc_active = ioread32(hcr_base + CC); | ||
1110 | |||
1111 | VPRINTK("Status of all queues :\n"); | ||
1112 | VPRINTK("qc_active/CC = 0x%x, CA = 0x%x, CE=0x%x,CQ=0x%x,apqa=0x%x\n", | ||
1113 | qc_active, | ||
1114 | ioread32(hcr_base + CA), | ||
1115 | ioread32(hcr_base + CE), | ||
1116 | ioread32(hcr_base + CQ), | ||
1117 | ap->qc_active); | ||
1118 | |||
1119 | if (qc_active & ap->qc_active) { | ||
1021 | int i; | 1120 | int i; |
1022 | /* Read command completed register */ | ||
1023 | qc_active = ioread32(hcr_base + CC); | ||
1024 | /* clear CC bit, this will also complete the interrupt */ | 1121 | /* clear CC bit, this will also complete the interrupt */ |
1025 | iowrite32(qc_active, hcr_base + CC); | 1122 | iowrite32(qc_active, hcr_base + CC); |
1026 | 1123 | ||
@@ -1032,8 +1129,9 @@ static void sata_fsl_host_intr(struct ata_port *ap) | |||
1032 | for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { | 1129 | for (i = 0; i < SATA_FSL_QUEUE_DEPTH; i++) { |
1033 | if (qc_active & (1 << i)) { | 1130 | if (qc_active & (1 << i)) { |
1034 | qc = ata_qc_from_tag(ap, i); | 1131 | qc = ata_qc_from_tag(ap, i); |
1035 | if (qc) | 1132 | if (qc) { |
1036 | ata_qc_complete(qc); | 1133 | ata_qc_complete(qc); |
1134 | } | ||
1037 | DPRINTK | 1135 | DPRINTK |
1038 | ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", | 1136 | ("completing ncq cmd,tag=%d,CC=0x%x,CA=0x%x\n", |
1039 | i, ioread32(hcr_base + CC), | 1137 | i, ioread32(hcr_base + CC), |
@@ -1042,19 +1140,21 @@ static void sata_fsl_host_intr(struct ata_port *ap) | |||
1042 | } | 1140 | } |
1043 | return; | 1141 | return; |
1044 | 1142 | ||
1045 | } else if (ap->qc_active) { | 1143 | } else if ((ap->qc_active & (1 << ATA_TAG_INTERNAL))) { |
1046 | iowrite32(1, hcr_base + CC); | 1144 | iowrite32(1, hcr_base + CC); |
1047 | qc = ata_qc_from_tag(ap, link->active_tag); | 1145 | qc = ata_qc_from_tag(ap, ATA_TAG_INTERNAL); |
1048 | 1146 | ||
1049 | DPRINTK("completing non-ncq cmd, tag=%d,CC=0x%x\n", | 1147 | DPRINTK("completing non-ncq cmd, CC=0x%x\n", |
1050 | link->active_tag, ioread32(hcr_base + CC)); | 1148 | ioread32(hcr_base + CC)); |
1051 | 1149 | ||
1052 | if (qc) | 1150 | if (qc) { |
1053 | ata_qc_complete(qc); | 1151 | ata_qc_complete(qc); |
1152 | } | ||
1054 | } else { | 1153 | } else { |
1055 | /* Spurious Interrupt!! */ | 1154 | /* Spurious Interrupt!! */ |
1056 | DPRINTK("spurious interrupt!!, CC = 0x%x\n", | 1155 | DPRINTK("spurious interrupt!!, CC = 0x%x\n", |
1057 | ioread32(hcr_base + CC)); | 1156 | ioread32(hcr_base + CC)); |
1157 | iowrite32(qc_active, hcr_base + CC); | ||
1058 | return; | 1158 | return; |
1059 | } | 1159 | } |
1060 | } | 1160 | } |
@@ -1130,9 +1230,6 @@ static int sata_fsl_init_controller(struct ata_host *host) | |||
1130 | iowrite32(0x00000FFFF, hcr_base + CE); | 1230 | iowrite32(0x00000FFFF, hcr_base + CE); |
1131 | iowrite32(0x00000FFFF, hcr_base + DE); | 1231 | iowrite32(0x00000FFFF, hcr_base + DE); |
1132 | 1232 | ||
1133 | /* initially assuming no Port multiplier, set CQPMP to 0 */ | ||
1134 | iowrite32(0x0, hcr_base + CQPMP); | ||
1135 | |||
1136 | /* | 1233 | /* |
1137 | * host controller will be brought on-line, during xx_port_start() | 1234 | * host controller will be brought on-line, during xx_port_start() |
1138 | * callback, that should also initiate the OOB, COMINIT sequence | 1235 | * callback, that should also initiate the OOB, COMINIT sequence |
@@ -1154,8 +1251,8 @@ static struct scsi_host_template sata_fsl_sht = { | |||
1154 | .dma_boundary = ATA_DMA_BOUNDARY, | 1251 | .dma_boundary = ATA_DMA_BOUNDARY, |
1155 | }; | 1252 | }; |
1156 | 1253 | ||
1157 | static const struct ata_port_operations sata_fsl_ops = { | 1254 | static struct ata_port_operations sata_fsl_ops = { |
1158 | .inherits = &sata_port_ops, | 1255 | .inherits = &sata_pmp_port_ops, |
1159 | 1256 | ||
1160 | .qc_prep = sata_fsl_qc_prep, | 1257 | .qc_prep = sata_fsl_qc_prep, |
1161 | .qc_issue = sata_fsl_qc_issue, | 1258 | .qc_issue = sata_fsl_qc_issue, |
@@ -1168,10 +1265,15 @@ static const struct ata_port_operations sata_fsl_ops = { | |||
1168 | .thaw = sata_fsl_thaw, | 1265 | .thaw = sata_fsl_thaw, |
1169 | .prereset = sata_fsl_prereset, | 1266 | .prereset = sata_fsl_prereset, |
1170 | .softreset = sata_fsl_softreset, | 1267 | .softreset = sata_fsl_softreset, |
1268 | .pmp_softreset = sata_fsl_softreset, | ||
1269 | .error_handler = sata_fsl_error_handler, | ||
1171 | .post_internal_cmd = sata_fsl_post_internal_cmd, | 1270 | .post_internal_cmd = sata_fsl_post_internal_cmd, |
1172 | 1271 | ||
1173 | .port_start = sata_fsl_port_start, | 1272 | .port_start = sata_fsl_port_start, |
1174 | .port_stop = sata_fsl_port_stop, | 1273 | .port_stop = sata_fsl_port_stop, |
1274 | |||
1275 | .pmp_attach = sata_fsl_pmp_attach, | ||
1276 | .pmp_detach = sata_fsl_pmp_detach, | ||
1175 | }; | 1277 | }; |
1176 | 1278 | ||
1177 | static const struct ata_port_info sata_fsl_port_info[] = { | 1279 | static const struct ata_port_info sata_fsl_port_info[] = { |
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index fb81f0c7a8c2..ad169ffbc4cb 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -72,7 +72,7 @@ | |||
72 | #include <linux/libata.h> | 72 | #include <linux/libata.h> |
73 | 73 | ||
74 | #define DRV_NAME "sata_mv" | 74 | #define DRV_NAME "sata_mv" |
75 | #define DRV_VERSION "1.21" | 75 | #define DRV_VERSION "1.24" |
76 | 76 | ||
77 | enum { | 77 | enum { |
78 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 78 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -122,8 +122,6 @@ enum { | |||
122 | /* Host Flags */ | 122 | /* Host Flags */ |
123 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 123 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
124 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 124 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
125 | /* SoC integrated controllers, no PCI interface */ | ||
126 | MV_FLAG_SOC = (1 << 28), | ||
127 | 125 | ||
128 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 126 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
129 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | | 127 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
@@ -226,6 +224,11 @@ enum { | |||
226 | 224 | ||
227 | PHY_MODE3 = 0x310, | 225 | PHY_MODE3 = 0x310, |
228 | PHY_MODE4 = 0x314, | 226 | PHY_MODE4 = 0x314, |
227 | PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ | ||
228 | PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ | ||
229 | PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ | ||
230 | PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ | ||
231 | |||
229 | PHY_MODE2 = 0x330, | 232 | PHY_MODE2 = 0x330, |
230 | SATA_IFCTL_OFS = 0x344, | 233 | SATA_IFCTL_OFS = 0x344, |
231 | SATA_TESTCTL_OFS = 0x348, | 234 | SATA_TESTCTL_OFS = 0x348, |
@@ -356,12 +359,12 @@ enum { | |||
356 | MV_HP_ERRATA_50XXB2 = (1 << 2), | 359 | MV_HP_ERRATA_50XXB2 = (1 << 2), |
357 | MV_HP_ERRATA_60X1B2 = (1 << 3), | 360 | MV_HP_ERRATA_60X1B2 = (1 << 3), |
358 | MV_HP_ERRATA_60X1C0 = (1 << 4), | 361 | MV_HP_ERRATA_60X1C0 = (1 << 4), |
359 | MV_HP_ERRATA_XX42A0 = (1 << 5), | ||
360 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ | 362 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ |
361 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ | 363 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ |
362 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ | 364 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ |
363 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ | 365 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
364 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ | 366 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ |
367 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ | ||
365 | 368 | ||
366 | /* Port private flags (pp_flags) */ | 369 | /* Port private flags (pp_flags) */ |
367 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 370 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
@@ -374,7 +377,7 @@ enum { | |||
374 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) | 377 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) |
375 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 378 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
376 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) | 379 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) |
377 | #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) | 380 | #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) |
378 | 381 | ||
379 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) | 382 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
380 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) | 383 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) |
@@ -652,7 +655,7 @@ static const struct ata_port_info mv_port_info[] = { | |||
652 | .port_ops = &mv_iie_ops, | 655 | .port_ops = &mv_iie_ops, |
653 | }, | 656 | }, |
654 | { /* chip_soc */ | 657 | { /* chip_soc */ |
655 | .flags = MV_GENIIE_FLAGS | MV_FLAG_SOC, | 658 | .flags = MV_GENIIE_FLAGS, |
656 | .pio_mask = 0x1f, /* pio0-4 */ | 659 | .pio_mask = 0x1f, /* pio0-4 */ |
657 | .udma_mask = ATA_UDMA6, | 660 | .udma_mask = ATA_UDMA6, |
658 | .port_ops = &mv_iie_ops, | 661 | .port_ops = &mv_iie_ops, |
@@ -812,12 +815,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
812 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 815 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); |
813 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, | 816 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
814 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 817 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
815 | 818 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | |
816 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
817 | writelfl((pp->crqb_dma & 0xffffffff) | index, | ||
818 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
819 | else | ||
820 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
821 | 819 | ||
822 | /* | 820 | /* |
823 | * initialize response queue | 821 | * initialize response queue |
@@ -827,13 +825,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
827 | 825 | ||
828 | WARN_ON(pp->crpb_dma & 0xff); | 826 | WARN_ON(pp->crpb_dma & 0xff); |
829 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | 827 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); |
830 | 828 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | |
831 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
832 | writelfl((pp->crpb_dma & 0xffffffff) | index, | ||
833 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
834 | else | ||
835 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
836 | |||
837 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, | 829 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
838 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 830 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
839 | } | 831 | } |
@@ -1254,7 +1246,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1254 | 1246 | ||
1255 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | 1247 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
1256 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1248 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ |
1257 | if (HAS_PCI(ap->host)) | 1249 | if (!IS_SOC(hpriv)) |
1258 | cfg |= (1 << 18); /* enab early completion */ | 1250 | cfg |= (1 << 18); /* enab early completion */ |
1259 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) | 1251 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1260 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | 1252 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ |
@@ -1330,6 +1322,9 @@ static int mv_port_start(struct ata_port *ap) | |||
1330 | goto out_port_free_dma_mem; | 1322 | goto out_port_free_dma_mem; |
1331 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); | 1323 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); |
1332 | 1324 | ||
1325 | /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ | ||
1326 | if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) | ||
1327 | ap->flags |= ATA_FLAG_AN; | ||
1333 | /* | 1328 | /* |
1334 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. | 1329 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. |
1335 | * For later hardware, we need one unique sg_tbl per NCQ tag. | 1330 | * For later hardware, we need one unique sg_tbl per NCQ tag. |
@@ -1600,6 +1595,24 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1600 | 1595 | ||
1601 | if ((qc->tf.protocol != ATA_PROT_DMA) && | 1596 | if ((qc->tf.protocol != ATA_PROT_DMA) && |
1602 | (qc->tf.protocol != ATA_PROT_NCQ)) { | 1597 | (qc->tf.protocol != ATA_PROT_NCQ)) { |
1598 | static int limit_warnings = 10; | ||
1599 | /* | ||
1600 | * Errata SATA#16, SATA#24: warn if multiple DRQs expected. | ||
1601 | * | ||
1602 | * Someday, we might implement special polling workarounds | ||
1603 | * for these, but it all seems rather unnecessary since we | ||
1604 | * normally use only DMA for commands which transfer more | ||
1605 | * than a single block of data. | ||
1606 | * | ||
1607 | * Much of the time, this could just work regardless. | ||
1608 | * So for now, just log the incident, and allow the attempt. | ||
1609 | */ | ||
1610 | if (limit_warnings > 0 && (qc->nbytes / qc->sect_size) > 1) { | ||
1611 | --limit_warnings; | ||
1612 | ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME | ||
1613 | ": attempting PIO w/multiple DRQ: " | ||
1614 | "this may fail due to h/w errata\n"); | ||
1615 | } | ||
1603 | /* | 1616 | /* |
1604 | * We're about to send a non-EDMA capable command to the | 1617 | * We're about to send a non-EDMA capable command to the |
1605 | * port. Turn off EDMA so there won't be problems accessing | 1618 | * port. Turn off EDMA so there won't be problems accessing |
@@ -2225,7 +2238,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
2225 | * a bogus register value which can indicate HW removal or PCI fault. | 2238 | * a bogus register value which can indicate HW removal or PCI fault. |
2226 | */ | 2239 | */ |
2227 | if (pending_irqs && main_irq_cause != 0xffffffffU) { | 2240 | if (pending_irqs && main_irq_cause != 0xffffffffU) { |
2228 | if (unlikely((pending_irqs & PCI_ERR) && HAS_PCI(host))) | 2241 | if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) |
2229 | handled = mv_pci_error(host, hpriv->base); | 2242 | handled = mv_pci_error(host, hpriv->base); |
2230 | else | 2243 | else |
2231 | handled = mv_host_intr(host, pending_irqs); | 2244 | handled = mv_host_intr(host, pending_irqs); |
@@ -2547,7 +2560,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2547 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | 2560 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
2548 | int fix_phy_mode4 = | 2561 | int fix_phy_mode4 = |
2549 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | 2562 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
2550 | u32 m2, tmp; | 2563 | u32 m2, m3; |
2551 | 2564 | ||
2552 | if (fix_phy_mode2) { | 2565 | if (fix_phy_mode2) { |
2553 | m2 = readl(port_mmio + PHY_MODE2); | 2566 | m2 = readl(port_mmio + PHY_MODE2); |
@@ -2564,28 +2577,36 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2564 | udelay(200); | 2577 | udelay(200); |
2565 | } | 2578 | } |
2566 | 2579 | ||
2567 | /* who knows what this magic does */ | 2580 | /* |
2568 | tmp = readl(port_mmio + PHY_MODE3); | 2581 | * Gen-II/IIe PHY_MODE3 errata RM#2: |
2569 | tmp &= ~0x7F800000; | 2582 | * Achieves better receiver noise performance than the h/w default: |
2570 | tmp |= 0x2A800000; | 2583 | */ |
2571 | writel(tmp, port_mmio + PHY_MODE3); | 2584 | m3 = readl(port_mmio + PHY_MODE3); |
2572 | 2585 | m3 = (m3 & 0x1f) | (0x5555601 << 5); | |
2573 | if (fix_phy_mode4) { | ||
2574 | u32 m4; | ||
2575 | |||
2576 | m4 = readl(port_mmio + PHY_MODE4); | ||
2577 | |||
2578 | if (hp_flags & MV_HP_ERRATA_60X1B2) | ||
2579 | tmp = readl(port_mmio + PHY_MODE3); | ||
2580 | 2586 | ||
2581 | /* workaround for errata FEr SATA#10 (part 1) */ | 2587 | /* Guideline 88F5182 (GL# SATA-S11) */ |
2582 | m4 = (m4 & ~(1 << 1)) | (1 << 0); | 2588 | if (IS_SOC(hpriv)) |
2589 | m3 &= ~0x1c; | ||
2583 | 2590 | ||
2591 | if (fix_phy_mode4) { | ||
2592 | u32 m4 = readl(port_mmio + PHY_MODE4); | ||
2593 | /* | ||
2594 | * Enforce reserved-bit restrictions on GenIIe devices only. | ||
2595 | * For earlier chipsets, force only the internal config field | ||
2596 | * (workaround for errata FEr SATA#10 part 1). | ||
2597 | */ | ||
2598 | if (IS_GEN_IIE(hpriv)) | ||
2599 | m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; | ||
2600 | else | ||
2601 | m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; | ||
2584 | writel(m4, port_mmio + PHY_MODE4); | 2602 | writel(m4, port_mmio + PHY_MODE4); |
2585 | |||
2586 | if (hp_flags & MV_HP_ERRATA_60X1B2) | ||
2587 | writel(tmp, port_mmio + PHY_MODE3); | ||
2588 | } | 2603 | } |
2604 | /* | ||
2605 | * Workaround for 60x1-B2 errata SATA#13: | ||
2606 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, | ||
2607 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. | ||
2608 | */ | ||
2609 | writel(m3, port_mmio + PHY_MODE3); | ||
2589 | 2610 | ||
2590 | /* Revert values of pre-emphasis and signal amps to the saved ones */ | 2611 | /* Revert values of pre-emphasis and signal amps to the saved ones */ |
2591 | m2 = readl(port_mmio + PHY_MODE2); | 2612 | m2 = readl(port_mmio + PHY_MODE2); |
@@ -2876,7 +2897,7 @@ static unsigned int mv_in_pcix_mode(struct ata_host *host) | |||
2876 | void __iomem *mmio = hpriv->base; | 2897 | void __iomem *mmio = hpriv->base; |
2877 | u32 reg; | 2898 | u32 reg; |
2878 | 2899 | ||
2879 | if (!HAS_PCI(host) || !IS_PCIE(hpriv)) | 2900 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) |
2880 | return 0; /* not PCI-X capable */ | 2901 | return 0; /* not PCI-X capable */ |
2881 | reg = readl(mmio + MV_PCI_MODE_OFS); | 2902 | reg = readl(mmio + MV_PCI_MODE_OFS); |
2882 | if ((reg & MV_PCI_MODE_MASK) == 0) | 2903 | if ((reg & MV_PCI_MODE_MASK) == 0) |
@@ -3003,10 +3024,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
3003 | hp_flags |= MV_HP_CUT_THROUGH; | 3024 | hp_flags |= MV_HP_CUT_THROUGH; |
3004 | 3025 | ||
3005 | switch (pdev->revision) { | 3026 | switch (pdev->revision) { |
3006 | case 0x0: | 3027 | case 0x2: /* Rev.B0: the first/only public release */ |
3007 | hp_flags |= MV_HP_ERRATA_XX42A0; | ||
3008 | break; | ||
3009 | case 0x1: | ||
3010 | hp_flags |= MV_HP_ERRATA_60X1C0; | 3028 | hp_flags |= MV_HP_ERRATA_60X1C0; |
3011 | break; | 3029 | break; |
3012 | default: | 3030 | default: |
@@ -3018,7 +3036,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
3018 | break; | 3036 | break; |
3019 | case chip_soc: | 3037 | case chip_soc: |
3020 | hpriv->ops = &mv_soc_ops; | 3038 | hpriv->ops = &mv_soc_ops; |
3021 | hp_flags |= MV_HP_ERRATA_60X1C0; | 3039 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0; |
3022 | break; | 3040 | break; |
3023 | 3041 | ||
3024 | default: | 3042 | default: |
@@ -3062,12 +3080,12 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3062 | if (rc) | 3080 | if (rc) |
3063 | goto done; | 3081 | goto done; |
3064 | 3082 | ||
3065 | if (HAS_PCI(host)) { | 3083 | if (IS_SOC(hpriv)) { |
3066 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | ||
3067 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | ||
3068 | } else { | ||
3069 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; | 3084 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; |
3070 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; | 3085 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; |
3086 | } else { | ||
3087 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | ||
3088 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | ||
3071 | } | 3089 | } |
3072 | 3090 | ||
3073 | /* global interrupt mask: 0 == mask everything */ | 3091 | /* global interrupt mask: 0 == mask everything */ |
@@ -3093,7 +3111,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3093 | mv_port_init(&ap->ioaddr, port_mmio); | 3111 | mv_port_init(&ap->ioaddr, port_mmio); |
3094 | 3112 | ||
3095 | #ifdef CONFIG_PCI | 3113 | #ifdef CONFIG_PCI |
3096 | if (HAS_PCI(host)) { | 3114 | if (!IS_SOC(hpriv)) { |
3097 | unsigned int offset = port_mmio - mmio; | 3115 | unsigned int offset = port_mmio - mmio; |
3098 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); | 3116 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); |
3099 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); | 3117 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); |
@@ -3113,7 +3131,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3113 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | 3131 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); |
3114 | } | 3132 | } |
3115 | 3133 | ||
3116 | if (HAS_PCI(host)) { | 3134 | if (!IS_SOC(hpriv)) { |
3117 | /* Clear any currently outstanding host interrupt conditions */ | 3135 | /* Clear any currently outstanding host interrupt conditions */ |
3118 | writelfl(0, mmio + hpriv->irq_cause_ofs); | 3136 | writelfl(0, mmio + hpriv->irq_cause_ofs); |
3119 | 3137 | ||
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index 8ee6b5b4ede7..84ffcc26a74b 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c | |||
@@ -370,6 +370,7 @@ static const struct pci_device_id sil24_pci_tbl[] = { | |||
370 | { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 }, | 370 | { PCI_VDEVICE(INTEL, 0x3124), BID_SIL3124 }, |
371 | { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 }, | 371 | { PCI_VDEVICE(CMD, 0x3132), BID_SIL3132 }, |
372 | { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 }, | 372 | { PCI_VDEVICE(CMD, 0x0242), BID_SIL3132 }, |
373 | { PCI_VDEVICE(CMD, 0x0244), BID_SIL3132 }, | ||
373 | { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 }, | 374 | { PCI_VDEVICE(CMD, 0x3131), BID_SIL3131 }, |
374 | { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 }, | 375 | { PCI_VDEVICE(CMD, 0x3531), BID_SIL3131 }, |
375 | 376 | ||
diff --git a/drivers/ata/sata_uli.c b/drivers/ata/sata_uli.c index f277cea904ce..db529b849948 100644 --- a/drivers/ata/sata_uli.c +++ b/drivers/ata/sata_uli.c | |||
@@ -83,6 +83,7 @@ static struct ata_port_operations uli_ops = { | |||
83 | .inherits = &ata_bmdma_port_ops, | 83 | .inherits = &ata_bmdma_port_ops, |
84 | .scr_read = uli_scr_read, | 84 | .scr_read = uli_scr_read, |
85 | .scr_write = uli_scr_write, | 85 | .scr_write = uli_scr_write, |
86 | .hardreset = ATA_OP_NULL, | ||
86 | }; | 87 | }; |
87 | 88 | ||
88 | static const struct ata_port_info uli_port_info = { | 89 | static const struct ata_port_info uli_port_info = { |