aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2009-01-30 18:51:54 -0500
committerJeff Garzik <jgarzik@redhat.com>2009-03-24 22:02:38 -0400
commitda14265e776f35067045b8555b5f5f7521e50bc4 (patch)
treec89dd1d0132b1e1443f8a6f60e99036d7a354aba /drivers/ata/sata_mv.c
parent32cd11a61007511ddb38783deec8bb1aa6735789 (diff)
sata_mv: introduce support for ATAPI devices
Add ATAPI support to sata_mv, using sff DMA for GEN_II chipsets, and plain old PIO for GEN_IIE. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c190
1 files changed, 186 insertions, 4 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 9c8ea2c1116d..6f8a49bc4521 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -31,8 +31,6 @@
31 * 31 *
32 * --> Complete a full errata audit for all chipsets to identify others. 32 * --> Complete a full errata audit for all chipsets to identify others.
33 * 33 *
34 * --> ATAPI support (Marvell claims the 60xx/70xx chips can do it).
35 *
36 * --> Develop a low-power-consumption strategy, and implement it. 34 * --> Develop a low-power-consumption strategy, and implement it.
37 * 35 *
38 * --> [Experiment, low priority] Investigate interrupt coalescing. 36 * --> [Experiment, low priority] Investigate interrupt coalescing.
@@ -68,7 +66,7 @@
68#include <linux/libata.h> 66#include <linux/libata.h>
69 67
70#define DRV_NAME "sata_mv" 68#define DRV_NAME "sata_mv"
71#define DRV_VERSION "1.25" 69#define DRV_VERSION "1.26"
72 70
73enum { 71enum {
74 /* BAR's are enumerated in terms of pci_resource_start() terms */ 72 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -126,7 +124,7 @@ enum {
126 124
127 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | MV_FLAG_IRQ_COALESCE | 125 MV_GEN_II_FLAGS = MV_COMMON_FLAGS | MV_FLAG_IRQ_COALESCE |
128 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | 126 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
129 ATA_FLAG_NCQ | ATA_FLAG_NO_ATAPI, 127 ATA_FLAG_NCQ,
130 128
131 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN, 129 MV_GEN_IIE_FLAGS = MV_GEN_II_FLAGS | ATA_FLAG_AN,
132 130
@@ -348,6 +346,12 @@ enum {
348 346
349 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */ 347 EDMA_HALTCOND_OFS = 0x60, /* GenIIe halt conditions */
350 348
349
350 BMDMA_CMD_OFS = 0x224, /* bmdma command register */
351 BMDMA_STATUS_OFS = 0x228, /* bmdma status register */
352 BMDMA_PRD_LOW_OFS = 0x22c, /* bmdma PRD addr 31:0 */
353 BMDMA_PRD_HIGH_OFS = 0x230, /* bmdma PRD addr 63:32 */
354
351 /* Host private flags (hp_flags) */ 355 /* Host private flags (hp_flags) */
352 MV_HP_FLAG_MSI = (1 << 0), 356 MV_HP_FLAG_MSI = (1 << 0),
353 MV_HP_ERRATA_50XXB0 = (1 << 1), 357 MV_HP_ERRATA_50XXB0 = (1 << 1),
@@ -547,6 +551,15 @@ static void mv_pmp_error_handler(struct ata_port *ap);
547static void mv_process_crpb_entries(struct ata_port *ap, 551static void mv_process_crpb_entries(struct ata_port *ap,
548 struct mv_port_priv *pp); 552 struct mv_port_priv *pp);
549 553
554static unsigned long mv_mode_filter(struct ata_device *dev,
555 unsigned long xfer_mask);
556static void mv_sff_irq_clear(struct ata_port *ap);
557static int mv_check_atapi_dma(struct ata_queued_cmd *qc);
558static void mv_bmdma_setup(struct ata_queued_cmd *qc);
559static void mv_bmdma_start(struct ata_queued_cmd *qc);
560static void mv_bmdma_stop(struct ata_queued_cmd *qc);
561static u8 mv_bmdma_status(struct ata_port *ap);
562
550/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below 563/* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below
551 * because we have to allow room for worst case splitting of 564 * because we have to allow room for worst case splitting of
552 * PRDs for 64K boundaries in mv_fill_sg(). 565 * PRDs for 64K boundaries in mv_fill_sg().
@@ -594,6 +607,14 @@ static struct ata_port_operations mv6_ops = {
594 .pmp_softreset = mv_softreset, 607 .pmp_softreset = mv_softreset,
595 .softreset = mv_softreset, 608 .softreset = mv_softreset,
596 .error_handler = mv_pmp_error_handler, 609 .error_handler = mv_pmp_error_handler,
610
611 .sff_irq_clear = mv_sff_irq_clear,
612 .check_atapi_dma = mv_check_atapi_dma,
613 .bmdma_setup = mv_bmdma_setup,
614 .bmdma_start = mv_bmdma_start,
615 .bmdma_stop = mv_bmdma_stop,
616 .bmdma_status = mv_bmdma_status,
617 .mode_filter = mv_mode_filter,
597}; 618};
598 619
599static struct ata_port_operations mv_iie_ops = { 620static struct ata_port_operations mv_iie_ops = {
@@ -1393,6 +1414,167 @@ static void mv_crqb_pack_cmd(__le16 *cmdw, u8 data, u8 addr, unsigned last)
1393} 1414}
1394 1415
1395/** 1416/**
1417 * mv_mode_filter - Allow ATAPI DMA only on GenII chips.
1418 * @dev: device whose xfer modes are being configured.
1419 *
1420 * Only the GenII hardware can use DMA with ATAPI drives.
1421 */
1422static unsigned long mv_mode_filter(struct ata_device *adev,
1423 unsigned long xfer_mask)
1424{
1425 if (adev->class == ATA_DEV_ATAPI) {
1426 struct mv_host_priv *hpriv = adev->link->ap->host->private_data;
1427 if (!IS_GEN_II(hpriv)) {
1428 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1429 ata_dev_printk(adev, KERN_INFO,
1430 "ATAPI DMA not supported on this chipset\n");
1431 }
1432 }
1433 return xfer_mask;
1434}
1435
1436/**
1437 * mv_sff_irq_clear - Clear hardware interrupt after DMA.
1438 * @ap: Port associated with this ATA transaction.
1439 *
1440 * We need this only for ATAPI bmdma transactions,
1441 * as otherwise we experience spurious interrupts
1442 * after libata-sff handles the bmdma interrupts.
1443 */
1444static void mv_sff_irq_clear(struct ata_port *ap)
1445{
1446 mv_clear_and_enable_port_irqs(ap, mv_ap_base(ap), ERR_IRQ);
1447}
1448
1449/**
1450 * mv_check_atapi_dma - Filter ATAPI cmds which are unsuitable for DMA.
1451 * @qc: queued command to check for chipset/DMA compatibility.
1452 *
1453 * The bmdma engines cannot handle speculative data sizes
1454 * (bytecount under/over flow). So only allow DMA for
1455 * data transfer commands with known data sizes.
1456 *
1457 * LOCKING:
1458 * Inherited from caller.
1459 */
1460static int mv_check_atapi_dma(struct ata_queued_cmd *qc)
1461{
1462 struct scsi_cmnd *scmd = qc->scsicmd;
1463
1464 if (scmd) {
1465 switch (scmd->cmnd[0]) {
1466 case READ_6:
1467 case READ_10:
1468 case READ_12:
1469 case WRITE_6:
1470 case WRITE_10:
1471 case WRITE_12:
1472 case GPCMD_READ_CD:
1473 case GPCMD_SEND_DVD_STRUCTURE:
1474 case GPCMD_SEND_CUE_SHEET:
1475 return 0; /* DMA is safe */
1476 }
1477 }
1478 return -EOPNOTSUPP; /* use PIO instead */
1479}
1480
1481/**
1482 * mv_bmdma_setup - Set up BMDMA transaction
1483 * @qc: queued command to prepare DMA for.
1484 *
1485 * LOCKING:
1486 * Inherited from caller.
1487 */
1488static void mv_bmdma_setup(struct ata_queued_cmd *qc)
1489{
1490 struct ata_port *ap = qc->ap;
1491 void __iomem *port_mmio = mv_ap_base(ap);
1492 struct mv_port_priv *pp = ap->private_data;
1493
1494 mv_fill_sg(qc);
1495
1496 /* clear all DMA cmd bits */
1497 writel(0, port_mmio + BMDMA_CMD_OFS);
1498
1499 /* load PRD table addr. */
1500 writel((pp->sg_tbl_dma[qc->tag] >> 16) >> 16,
1501 port_mmio + BMDMA_PRD_HIGH_OFS);
1502 writelfl(pp->sg_tbl_dma[qc->tag],
1503 port_mmio + BMDMA_PRD_LOW_OFS);
1504
1505 /* issue r/w command */
1506 ap->ops->sff_exec_command(ap, &qc->tf);
1507}
1508
1509/**
1510 * mv_bmdma_start - Start a BMDMA transaction
1511 * @qc: queued command to start DMA on.
1512 *
1513 * LOCKING:
1514 * Inherited from caller.
1515 */
1516static void mv_bmdma_start(struct ata_queued_cmd *qc)
1517{
1518 struct ata_port *ap = qc->ap;
1519 void __iomem *port_mmio = mv_ap_base(ap);
1520 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
1521 u32 cmd = (rw ? 0 : ATA_DMA_WR) | ATA_DMA_START;
1522
1523 /* start host DMA transaction */
1524 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1525}
1526
1527/**
1528 * mv_bmdma_stop - Stop BMDMA transfer
1529 * @qc: queued command to stop DMA on.
1530 *
1531 * Clears the ATA_DMA_START flag in the bmdma control register
1532 *
1533 * LOCKING:
1534 * Inherited from caller.
1535 */
1536static void mv_bmdma_stop(struct ata_queued_cmd *qc)
1537{
1538 struct ata_port *ap = qc->ap;
1539 void __iomem *port_mmio = mv_ap_base(ap);
1540 u32 cmd;
1541
1542 /* clear start/stop bit */
1543 cmd = readl(port_mmio + BMDMA_CMD_OFS);
1544 cmd &= ~ATA_DMA_START;
1545 writelfl(cmd, port_mmio + BMDMA_CMD_OFS);
1546
1547 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
1548 ata_sff_dma_pause(ap);
1549}
1550
1551/**
1552 * mv_bmdma_status - Read BMDMA status
1553 * @ap: port for which to retrieve DMA status.
1554 *
1555 * Read and return equivalent of the sff BMDMA status register.
1556 *
1557 * LOCKING:
1558 * Inherited from caller.
1559 */
1560static u8 mv_bmdma_status(struct ata_port *ap)
1561{
1562 void __iomem *port_mmio = mv_ap_base(ap);
1563 u32 reg, status;
1564
1565 /*
1566 * Other bits are valid only if ATA_DMA_ACTIVE==0,
1567 * and the ATA_DMA_INTR bit doesn't exist.
1568 */
1569 reg = readl(port_mmio + BMDMA_STATUS_OFS);
1570 if (reg & ATA_DMA_ACTIVE)
1571 status = ATA_DMA_ACTIVE;
1572 else
1573 status = (reg & ATA_DMA_ERR) | ATA_DMA_INTR;
1574 return status;
1575}
1576
1577/**
1396 * mv_qc_prep - Host specific command preparation. 1578 * mv_qc_prep - Host specific command preparation.
1397 * @qc: queued command to prepare 1579 * @qc: queued command to prepare
1398 * 1580 *