aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2008-01-26 18:31:33 -0500
committerJeff Garzik <jeff@garzik.org>2008-02-01 11:29:47 -0500
commit721091685f853ba4e6c49f26f989db0b1a811250 (patch)
treeef71c60ad844419ffc3f19c330a64fa6cd36fff8 /drivers
parent0c58912e192fc3a4835d772aafa40b72552b819f (diff)
sata_mv ncq Add want ncq parameter for EDMA configuration
An extra EDMA config bit is required for NCQ operation. So set/clear it as needed, and cache current setting in port_priv. For now though, it will always be "off" (0). Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/sata_mv.c31
1 files changed, 23 insertions, 8 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index f117f6a01676..32a0ace5234a 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -331,6 +331,7 @@ enum {
331 331
332 /* Port private flags (pp_flags) */ 332 /* Port private flags (pp_flags) */
333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 333 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
334 MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */
334 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ 335 MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */
335}; 336};
336 337
@@ -471,8 +472,9 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
471static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); 472static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
472static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 473static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
473 unsigned int port_no); 474 unsigned int port_no);
474static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, 475static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
475 void __iomem *port_mmio); 476 void __iomem *port_mmio, int want_ncq);
477static int __mv_stop_dma(struct ata_port *ap);
476 478
477static struct scsi_host_template mv5_sht = { 479static struct scsi_host_template mv5_sht = {
478 .module = THIS_MODULE, 480 .module = THIS_MODULE,
@@ -838,8 +840,15 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
838 * Inherited from caller. 840 * Inherited from caller.
839 */ 841 */
840static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, 842static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
841 struct mv_port_priv *pp) 843 struct mv_port_priv *pp, u8 protocol)
842{ 844{
845 int want_ncq = (protocol == ATA_PROT_NCQ);
846
847 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
848 int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0);
849 if (want_ncq != using_ncq)
850 __mv_stop_dma(ap);
851 }
843 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 852 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
844 struct mv_host_priv *hpriv = ap->host->private_data; 853 struct mv_host_priv *hpriv = ap->host->private_data;
845 int hard_port = mv_hardport_from_port(ap->port_no); 854 int hard_port = mv_hardport_from_port(ap->port_no);
@@ -859,7 +868,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
859 hc_mmio + HC_IRQ_CAUSE_OFS); 868 hc_mmio + HC_IRQ_CAUSE_OFS);
860 } 869 }
861 870
862 mv_edma_cfg(ap, hpriv, port_mmio); 871 mv_edma_cfg(pp, hpriv, port_mmio, want_ncq);
863 872
864 /* clear FIS IRQ Cause */ 873 /* clear FIS IRQ Cause */
865 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 874 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
@@ -1045,8 +1054,8 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1045 return -EINVAL; 1054 return -EINVAL;
1046} 1055}
1047 1056
1048static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, 1057static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv,
1049 void __iomem *port_mmio) 1058 void __iomem *port_mmio, int want_ncq)
1050{ 1059{
1051 u32 cfg; 1060 u32 cfg;
1052 1061
@@ -1066,6 +1075,12 @@ static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1066 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1075 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1067 } 1076 }
1068 1077
1078 if (want_ncq) {
1079 cfg |= EDMA_CFG_NCQ;
1080 pp->pp_flags |= MV_PP_FLAG_NCQ_EN;
1081 } else
1082 pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN;
1083
1069 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1084 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
1070} 1085}
1071 1086
@@ -1128,7 +1143,7 @@ static int mv_port_start(struct ata_port *ap)
1128 1143
1129 spin_lock_irqsave(&ap->host->lock, flags); 1144 spin_lock_irqsave(&ap->host->lock, flags);
1130 1145
1131 mv_edma_cfg(ap, hpriv, port_mmio); 1146 mv_edma_cfg(pp, hpriv, port_mmio, 0);
1132 1147
1133 mv_set_edma_ptrs(port_mmio, hpriv, pp); 1148 mv_set_edma_ptrs(port_mmio, hpriv, pp);
1134 1149
@@ -1396,7 +1411,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1396 return ata_qc_issue_prot(qc); 1411 return ata_qc_issue_prot(qc);
1397 } 1412 }
1398 1413
1399 mv_start_dma(ap, port_mmio, pp); 1414 mv_start_dma(ap, port_mmio, pp, qc->tf.protocol);
1400 1415
1401 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1416 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1402 1417