aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2008-01-26 18:31:16 -0500
committerJeff Garzik <jeff@garzik.org>2008-02-01 11:29:46 -0500
commit0c58912e192fc3a4835d772aafa40b72552b819f (patch)
tree46778fee4c3f7edcff8b0ffb5eb6b1a582d87418 /drivers/ata/sata_mv.c
parentf630d562829fcd8160a118f98c1e5b9cdb4e703e (diff)
sata_mv ncq Fix EDMA configuration
Simplify and fix EDMA configuration setup to match Marvell specificiations. The chip documentation gives a specific (re)init sequence, which we now follow. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c54
1 files changed, 34 insertions, 20 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 298f17d6e12c..f117f6a01676 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -210,6 +210,7 @@ enum {
210 /* SATA registers */ 210 /* SATA registers */
211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 211 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
212 SATA_ACTIVE_OFS = 0x350, 212 SATA_ACTIVE_OFS = 0x350,
213 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
213 PHY_MODE3 = 0x310, 214 PHY_MODE3 = 0x310,
214 PHY_MODE4 = 0x314, 215 PHY_MODE4 = 0x314,
215 PHY_MODE2 = 0x330, 216 PHY_MODE2 = 0x330,
@@ -222,11 +223,11 @@ enum {
222 223
223 /* Port registers */ 224 /* Port registers */
224 EDMA_CFG_OFS = 0, 225 EDMA_CFG_OFS = 0,
225 EDMA_CFG_Q_DEPTH = 0, /* queueing disabled */ 226 EDMA_CFG_Q_DEPTH = 0x1f, /* max device queue depth */
226 EDMA_CFG_NCQ = (1 << 5), 227 EDMA_CFG_NCQ = (1 << 5), /* for R/W FPDMA queued */
227 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ 228 EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */
228 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ 229 EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */
229 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ 230 EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */
230 231
231 EDMA_ERR_IRQ_CAUSE_OFS = 0x8, 232 EDMA_ERR_IRQ_CAUSE_OFS = 0x8,
232 EDMA_ERR_IRQ_MASK_OFS = 0xc, 233 EDMA_ERR_IRQ_MASK_OFS = 0xc,
@@ -470,6 +471,8 @@ static void mv6_reset_flash(struct mv_host_priv *hpriv, void __iomem *mmio);
470static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio); 471static void mv_reset_pci_bus(struct pci_dev *pdev, void __iomem *mmio);
471static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, 472static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
472 unsigned int port_no); 473 unsigned int port_no);
474static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
475 void __iomem *port_mmio);
473 476
474static struct scsi_host_template mv5_sht = { 477static struct scsi_host_template mv5_sht = {
475 .module = THIS_MODULE, 478 .module = THIS_MODULE,
@@ -834,13 +837,33 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
834 * LOCKING: 837 * LOCKING:
835 * Inherited from caller. 838 * Inherited from caller.
836 */ 839 */
837static void mv_start_dma(void __iomem *port_mmio, struct mv_host_priv *hpriv, 840static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
838 struct mv_port_priv *pp) 841 struct mv_port_priv *pp)
839{ 842{
840 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 843 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
844 struct mv_host_priv *hpriv = ap->host->private_data;
845 int hard_port = mv_hardport_from_port(ap->port_no);
846 void __iomem *hc_mmio = mv_hc_base_from_port(
847 ap->host->iomap[MV_PRIMARY_BAR], hard_port);
848 u32 hc_irq_cause, ipending;
849
841 /* clear EDMA event indicators, if any */ 850 /* clear EDMA event indicators, if any */
842 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 851 writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
843 852
853 /* clear EDMA interrupt indicator, if any */
854 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
855 ipending = (DEV_IRQ << hard_port) |
856 (CRPB_DMA_DONE << hard_port);
857 if (hc_irq_cause & ipending) {
858 writelfl(hc_irq_cause & ~ipending,
859 hc_mmio + HC_IRQ_CAUSE_OFS);
860 }
861
862 mv_edma_cfg(ap, hpriv, port_mmio);
863
864 /* clear FIS IRQ Cause */
865 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
866
844 mv_set_edma_ptrs(port_mmio, hpriv, pp); 867 mv_set_edma_ptrs(port_mmio, hpriv, pp);
845 868
846 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 869 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
@@ -1025,30 +1048,22 @@ static int mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1025static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv, 1048static void mv_edma_cfg(struct ata_port *ap, struct mv_host_priv *hpriv,
1026 void __iomem *port_mmio) 1049 void __iomem *port_mmio)
1027{ 1050{
1028 u32 cfg = readl(port_mmio + EDMA_CFG_OFS); 1051 u32 cfg;
1029 1052
1030 /* set up non-NCQ EDMA configuration */ 1053 /* set up non-NCQ EDMA configuration */
1031 cfg &= ~(1 << 9); /* disable eQue */ 1054 cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */
1032 1055
1033 if (IS_GEN_I(hpriv)) { 1056 if (IS_GEN_I(hpriv))
1034 cfg &= ~0x1f; /* clear queue depth */
1035 cfg |= (1 << 8); /* enab config burst size mask */ 1057 cfg |= (1 << 8); /* enab config burst size mask */
1036 }
1037 1058
1038 else if (IS_GEN_II(hpriv)) { 1059 else if (IS_GEN_II(hpriv))
1039 cfg &= ~0x1f; /* clear queue depth */
1040 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN; 1060 cfg |= EDMA_CFG_RD_BRST_EXT | EDMA_CFG_WR_BUFF_LEN;
1041 cfg &= ~(EDMA_CFG_NCQ | EDMA_CFG_NCQ_GO_ON_ERR); /* clear NCQ */
1042 }
1043 1061
1044 else if (IS_GEN_IIE(hpriv)) { 1062 else if (IS_GEN_IIE(hpriv)) {
1045 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1063 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1046 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1064 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1047 cfg &= ~(1 << 19); /* dis 128-entry queue (for now?) */
1048 cfg |= (1 << 18); /* enab early completion */ 1065 cfg |= (1 << 18); /* enab early completion */
1049 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */ 1066 cfg |= (1 << 17); /* enab cut-through (dis stor&forwrd) */
1050 cfg &= ~(1 << 16); /* dis FIS-based switching (for now) */
1051 cfg &= ~(EDMA_CFG_NCQ); /* clear NCQ */
1052 } 1067 }
1053 1068
1054 writelfl(cfg, port_mmio + EDMA_CFG_OFS); 1069 writelfl(cfg, port_mmio + EDMA_CFG_OFS);
@@ -1370,7 +1385,6 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1370 struct ata_port *ap = qc->ap; 1385 struct ata_port *ap = qc->ap;
1371 void __iomem *port_mmio = mv_ap_base(ap); 1386 void __iomem *port_mmio = mv_ap_base(ap);
1372 struct mv_port_priv *pp = ap->private_data; 1387 struct mv_port_priv *pp = ap->private_data;
1373 struct mv_host_priv *hpriv = ap->host->private_data;
1374 u32 in_index; 1388 u32 in_index;
1375 1389
1376 if (qc->tf.protocol != ATA_PROT_DMA) { 1390 if (qc->tf.protocol != ATA_PROT_DMA) {
@@ -1382,7 +1396,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1382 return ata_qc_issue_prot(qc); 1396 return ata_qc_issue_prot(qc);
1383 } 1397 }
1384 1398
1385 mv_start_dma(port_mmio, hpriv, pp); 1399 mv_start_dma(ap, port_mmio, pp);
1386 1400
1387 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK; 1401 in_index = pp->req_idx & MV_MAX_Q_DEPTH_MASK;
1388 1402