diff options
author | Mark Lord <liml@rtr.ca> | 2008-04-19 15:06:40 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-04-25 01:25:48 -0400 |
commit | fcfb1f77cea81f74d865b4d33f2e452ffa1973e8 (patch) | |
tree | eea4215c354333cbbab55a608d44b7a3aca59dce /drivers/ata | |
parent | 1cfd19aeb8c8b6291a9d11143b4d8f3dac508ed4 (diff) |
sata_mv: simplify request/response queue handling
Try and simplify handling of the request/response queues.
Maintain the cached copies of queue indexes in a fully-masked state,
rather than having each use of them have to do the masking.
Split off handling of a single crpb response into a separate function,
to reduce complexity in the main mv_process_crpb_entries() routine.
Ignore the rarely-valid error bits from the crpb status field,
as we already handle that information in mv_err_intr().
For now, preserve the rest of the original logic.
A later patch will deal with fixing that separately.
Signed-off-by: Mark Lord <mlord@pobox.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/sata_mv.c | 109 |
1 files changed, 56 insertions, 53 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 552006853cd7..cee78f9e9d1b 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -804,7 +804,8 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
804 | /* | 804 | /* |
805 | * initialize request queue | 805 | * initialize request queue |
806 | */ | 806 | */ |
807 | index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; | 807 | pp->req_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
808 | index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | ||
808 | 809 | ||
809 | WARN_ON(pp->crqb_dma & 0x3ff); | 810 | WARN_ON(pp->crqb_dma & 0x3ff); |
810 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 811 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); |
@@ -820,7 +821,8 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
820 | /* | 821 | /* |
821 | * initialize response queue | 822 | * initialize response queue |
822 | */ | 823 | */ |
823 | index = (pp->resp_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_RSP_Q_PTR_SHIFT; | 824 | pp->resp_idx &= MV_MAX_Q_DEPTH_MASK; /* paranoia */ |
825 | index = pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT; | ||
824 | 826 | ||
825 | WARN_ON(pp->crpb_dma & 0xff); | 827 | WARN_ON(pp->crpb_dma & 0xff); |
826 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | 828 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); |
@@ -1312,7 +1314,7 @@ static void mv_qc_prep(struct ata_queued_cmd *qc) | |||
1312 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; | 1314 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
1313 | 1315 | ||
1314 | /* get current queue index from software */ | 1316 | /* get current queue index from software */ |
1315 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | 1317 | in_index = pp->req_idx; |
1316 | 1318 | ||
1317 | pp->crqb[in_index].sg_addr = | 1319 | pp->crqb[in_index].sg_addr = |
1318 | cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); | 1320 | cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
@@ -1404,7 +1406,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1404 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; | 1406 | flags |= (qc->dev->link->pmp & 0xf) << CRQB_PMP_SHIFT; |
1405 | 1407 | ||
1406 | /* get current queue index from software */ | 1408 | /* get current queue index from software */ |
1407 | in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; | 1409 | in_index = pp->req_idx; |
1408 | 1410 | ||
1409 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; | 1411 | crqb = (struct mv_crqb_iie *) &pp->crqb[in_index]; |
1410 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); | 1412 | crqb->addr = cpu_to_le32(pp->sg_tbl_dma[qc->tag] & 0xffffffff); |
@@ -1471,9 +1473,8 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1471 | 1473 | ||
1472 | mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); | 1474 | mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); |
1473 | 1475 | ||
1474 | pp->req_idx++; | 1476 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; |
1475 | 1477 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | |
1476 | in_index = (pp->req_idx & MV_MAX_Q_DEPTH_MASK) << EDMA_REQ_Q_PTR_SHIFT; | ||
1477 | 1478 | ||
1478 | /* and write the request in pointer to kick the EDMA to life */ | 1479 | /* and write the request in pointer to kick the EDMA to life */ |
1479 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, | 1480 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | in_index, |
@@ -1607,70 +1608,72 @@ static void mv_intr_pio(struct ata_port *ap) | |||
1607 | ata_qc_complete(qc); | 1608 | ata_qc_complete(qc); |
1608 | } | 1609 | } |
1609 | 1610 | ||
1610 | static void mv_intr_edma(struct ata_port *ap) | 1611 | static void mv_process_crpb_response(struct ata_port *ap, |
1612 | struct mv_crpb *response, unsigned int tag, int ncq_enabled) | ||
1613 | { | ||
1614 | struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag); | ||
1615 | |||
1616 | if (qc) { | ||
1617 | u8 ata_status; | ||
1618 | u16 edma_status = le16_to_cpu(response->flags); | ||
1619 | /* | ||
1620 | * edma_status from a response queue entry: | ||
1621 | * LSB is from EDMA_ERR_IRQ_CAUSE_OFS (non-NCQ only). | ||
1622 | * MSB is saved ATA status from command completion. | ||
1623 | */ | ||
1624 | if (!ncq_enabled) { | ||
1625 | u8 err_cause = edma_status & 0xff & ~EDMA_ERR_DEV; | ||
1626 | if (err_cause) { | ||
1627 | /* | ||
1628 | * Error will be seen/handled by mv_err_intr(). | ||
1629 | * So do nothing at all here. | ||
1630 | */ | ||
1631 | return; | ||
1632 | } | ||
1633 | } | ||
1634 | ata_status = edma_status >> CRPB_FLAG_STATUS_SHIFT; | ||
1635 | qc->err_mask |= ac_err_mask(ata_status); | ||
1636 | ata_qc_complete(qc); | ||
1637 | } else { | ||
1638 | ata_port_printk(ap, KERN_ERR, "%s: no qc for tag=%d\n", | ||
1639 | __func__, tag); | ||
1640 | } | ||
1641 | } | ||
1642 | |||
1643 | static void mv_process_crpb_entries(struct ata_port *ap, struct mv_port_priv *pp) | ||
1611 | { | 1644 | { |
1612 | void __iomem *port_mmio = mv_ap_base(ap); | 1645 | void __iomem *port_mmio = mv_ap_base(ap); |
1613 | struct mv_host_priv *hpriv = ap->host->private_data; | 1646 | struct mv_host_priv *hpriv = ap->host->private_data; |
1614 | struct mv_port_priv *pp = ap->private_data; | 1647 | u32 in_index; |
1615 | struct ata_queued_cmd *qc; | ||
1616 | u32 out_index, in_index; | ||
1617 | bool work_done = false; | 1648 | bool work_done = false; |
1649 | int ncq_enabled = (pp->pp_flags & MV_PP_FLAG_NCQ_EN); | ||
1618 | 1650 | ||
1619 | /* get h/w response queue pointer */ | 1651 | /* Get the hardware queue position index */ |
1620 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) | 1652 | in_index = (readl(port_mmio + EDMA_RSP_Q_IN_PTR_OFS) |
1621 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | 1653 | >> EDMA_RSP_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; |
1622 | 1654 | ||
1623 | while (1) { | 1655 | /* Process new responses from since the last time we looked */ |
1624 | u16 status; | 1656 | while (in_index != pp->resp_idx) { |
1625 | unsigned int tag; | 1657 | unsigned int tag; |
1658 | struct mv_crpb *response = &pp->crpb[pp->resp_idx]; | ||
1626 | 1659 | ||
1627 | /* get s/w response queue last-read pointer, and compare */ | 1660 | pp->resp_idx = (pp->resp_idx + 1) & MV_MAX_Q_DEPTH_MASK; |
1628 | out_index = pp->resp_idx & MV_MAX_Q_DEPTH_MASK; | ||
1629 | if (in_index == out_index) | ||
1630 | break; | ||
1631 | 1661 | ||
1632 | /* 50xx: get active ATA command */ | 1662 | if (IS_GEN_I(hpriv)) { |
1633 | if (IS_GEN_I(hpriv)) | 1663 | /* 50xx: no NCQ, only one command active at a time */ |
1634 | tag = ap->link.active_tag; | 1664 | tag = ap->link.active_tag; |
1635 | 1665 | } else { | |
1636 | /* Gen II/IIE: get active ATA command via tag, to enable | 1666 | /* Gen II/IIE: get command tag from CRPB entry */ |
1637 | * support for queueing. this works transparently for | 1667 | tag = le16_to_cpu(response->id) & 0x1f; |
1638 | * queued and non-queued modes. | ||
1639 | */ | ||
1640 | else | ||
1641 | tag = le16_to_cpu(pp->crpb[out_index].id) & 0x1f; | ||
1642 | |||
1643 | qc = ata_qc_from_tag(ap, tag); | ||
1644 | |||
1645 | /* For non-NCQ mode, the lower 8 bits of status | ||
1646 | * are from EDMA_ERR_IRQ_CAUSE_OFS, | ||
1647 | * which should be zero if all went well. | ||
1648 | */ | ||
1649 | status = le16_to_cpu(pp->crpb[out_index].flags); | ||
1650 | if ((status & 0xff) && !(pp->pp_flags & MV_PP_FLAG_NCQ_EN)) { | ||
1651 | mv_err_intr(ap, qc); | ||
1652 | return; | ||
1653 | } | ||
1654 | |||
1655 | /* and finally, complete the ATA command */ | ||
1656 | if (qc) { | ||
1657 | qc->err_mask |= | ||
1658 | ac_err_mask(status >> CRPB_FLAG_STATUS_SHIFT); | ||
1659 | ata_qc_complete(qc); | ||
1660 | } | 1668 | } |
1661 | 1669 | mv_process_crpb_response(ap, response, tag, ncq_enabled); | |
1662 | /* advance software response queue pointer, to | ||
1663 | * indicate (after the loop completes) to hardware | ||
1664 | * that we have consumed a response queue entry. | ||
1665 | */ | ||
1666 | work_done = true; | 1670 | work_done = true; |
1667 | pp->resp_idx++; | ||
1668 | } | 1671 | } |
1669 | 1672 | ||
1670 | /* Update the software queue position index in hardware */ | 1673 | /* Update the software queue position index in hardware */ |
1671 | if (work_done) | 1674 | if (work_done) |
1672 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | | 1675 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | |
1673 | (out_index << EDMA_RSP_Q_PTR_SHIFT), | 1676 | (pp->resp_idx << EDMA_RSP_Q_PTR_SHIFT), |
1674 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 1677 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
1675 | } | 1678 | } |
1676 | 1679 | ||
@@ -1748,7 +1751,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1748 | 1751 | ||
1749 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 1752 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
1750 | if ((DMA_IRQ << hardport) & hc_irq_cause) | 1753 | if ((DMA_IRQ << hardport) & hc_irq_cause) |
1751 | mv_intr_edma(ap); | 1754 | mv_process_crpb_entries(ap, pp); |
1752 | } else { | 1755 | } else { |
1753 | if ((DEV_IRQ << hardport) & hc_irq_cause) | 1756 | if ((DEV_IRQ << hardport) & hc_irq_cause) |
1754 | mv_intr_pio(ap); | 1757 | mv_intr_pio(ap); |