diff options
author | Mark Lord <liml@rtr.ca> | 2009-01-30 18:47:51 -0500 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2009-03-24 22:02:38 -0400 |
commit | 00b81235aa0368f84c0e704bec4142cd8c516ad5 (patch) | |
tree | fcb89d62f439d7233a926ff79800f598a68680e8 /drivers/ata | |
parent | 91b1a84c10869e2e46a576e5367de3166bff8ecc (diff) |
sata_mv: rearrange mv_start_dma() and friends
Rearrange mv_start_dma() and friends, in preparation for adding
non-EDMA DMA modes, and non-EDMA interrupts, to the driver.
Signed-off-by: Mark Lord <mlord@pobox.com>
Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata')
-rw-r--r-- | drivers/ata/sata_mv.c | 66 |
1 files changed, 38 insertions, 28 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 3dc35543fb3d..fb3288bbd9fb 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -536,7 +536,7 @@ static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
536 | unsigned int port_no); | 536 | unsigned int port_no); |
537 | static int mv_stop_edma(struct ata_port *ap); | 537 | static int mv_stop_edma(struct ata_port *ap); |
538 | static int mv_stop_edma_engine(void __iomem *port_mmio); | 538 | static int mv_stop_edma_engine(void __iomem *port_mmio); |
539 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq); | 539 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma); |
540 | 540 | ||
541 | static void mv_pmp_select(struct ata_port *ap, int pmp); | 541 | static void mv_pmp_select(struct ata_port *ap, int pmp); |
542 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, | 542 | static int mv_pmp_hardreset(struct ata_link *link, unsigned int *class, |
@@ -849,8 +849,32 @@ static void mv_enable_port_irqs(struct ata_port *ap, | |||
849 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); | 849 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); |
850 | } | 850 | } |
851 | 851 | ||
852 | static void mv_clear_and_enable_port_irqs(struct ata_port *ap, | ||
853 | void __iomem *port_mmio, | ||
854 | unsigned int port_irqs) | ||
855 | { | ||
856 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
857 | int hardport = mv_hardport_from_port(ap->port_no); | ||
858 | void __iomem *hc_mmio = mv_hc_base_from_port( | ||
859 | mv_host_base(ap->host), ap->port_no); | ||
860 | u32 hc_irq_cause; | ||
861 | |||
862 | /* clear EDMA event indicators, if any */ | ||
863 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
864 | |||
865 | /* clear pending irq events */ | ||
866 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | ||
867 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
868 | |||
869 | /* clear FIS IRQ Cause */ | ||
870 | if (IS_GEN_IIE(hpriv)) | ||
871 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
872 | |||
873 | mv_enable_port_irqs(ap, port_irqs); | ||
874 | } | ||
875 | |||
852 | /** | 876 | /** |
853 | * mv_start_dma - Enable eDMA engine | 877 | * mv_start_edma - Enable eDMA engine |
854 | * @base: port base address | 878 | * @base: port base address |
855 | * @pp: port private data | 879 | * @pp: port private data |
856 | * | 880 | * |
@@ -860,7 +884,7 @@ static void mv_enable_port_irqs(struct ata_port *ap, | |||
860 | * LOCKING: | 884 | * LOCKING: |
861 | * Inherited from caller. | 885 | * Inherited from caller. |
862 | */ | 886 | */ |
863 | static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | 887 | static void mv_start_edma(struct ata_port *ap, void __iomem *port_mmio, |
864 | struct mv_port_priv *pp, u8 protocol) | 888 | struct mv_port_priv *pp, u8 protocol) |
865 | { | 889 | { |
866 | int want_ncq = (protocol == ATA_PROT_NCQ); | 890 | int want_ncq = (protocol == ATA_PROT_NCQ); |
@@ -872,26 +896,11 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
872 | } | 896 | } |
873 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | 897 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
874 | struct mv_host_priv *hpriv = ap->host->private_data; | 898 | struct mv_host_priv *hpriv = ap->host->private_data; |
875 | int hardport = mv_hardport_from_port(ap->port_no); | ||
876 | void __iomem *hc_mmio = mv_hc_base_from_port( | ||
877 | mv_host_base(ap->host), ap->port_no); | ||
878 | u32 hc_irq_cause; | ||
879 | |||
880 | /* clear EDMA event indicators, if any */ | ||
881 | writelfl(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | ||
882 | |||
883 | /* clear pending irq events */ | ||
884 | hc_irq_cause = ~((DEV_IRQ | DMA_IRQ) << hardport); | ||
885 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | ||
886 | 899 | ||
887 | mv_edma_cfg(ap, want_ncq); | 900 | mv_edma_cfg(ap, want_ncq, 1); |
888 | |||
889 | /* clear FIS IRQ Cause */ | ||
890 | if (IS_GEN_IIE(hpriv)) | ||
891 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
892 | 901 | ||
893 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 902 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
894 | mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ); | 903 | mv_clear_and_enable_port_irqs(ap, port_mmio, DONE_IRQ|ERR_IRQ); |
895 | 904 | ||
896 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); | 905 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); |
897 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 906 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
@@ -1173,7 +1182,7 @@ static void mv_60x1_errata_sata25(struct ata_port *ap, int want_ncq) | |||
1173 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); | 1182 | writel(new, hpriv->base + MV_GPIO_PORT_CTL_OFS); |
1174 | } | 1183 | } |
1175 | 1184 | ||
1176 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | 1185 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq, int want_edma) |
1177 | { | 1186 | { |
1178 | u32 cfg; | 1187 | u32 cfg; |
1179 | struct mv_port_priv *pp = ap->private_data; | 1188 | struct mv_port_priv *pp = ap->private_data; |
@@ -1182,7 +1191,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1182 | 1191 | ||
1183 | /* set up non-NCQ EDMA configuration */ | 1192 | /* set up non-NCQ EDMA configuration */ |
1184 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ | 1193 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
1185 | pp->pp_flags &= ~MV_PP_FLAG_FBS_EN; | 1194 | pp->pp_flags &= ~(MV_PP_FLAG_FBS_EN | MV_PP_FLAG_NCQ_EN); |
1186 | 1195 | ||
1187 | if (IS_GEN_I(hpriv)) | 1196 | if (IS_GEN_I(hpriv)) |
1188 | cfg |= (1 << 8); /* enab config burst size mask */ | 1197 | cfg |= (1 << 8); /* enab config burst size mask */ |
@@ -1211,9 +1220,11 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1211 | } | 1220 | } |
1212 | 1221 | ||
1213 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | 1222 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
1214 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1223 | if (want_edma) { |
1215 | if (!IS_SOC(hpriv)) | 1224 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ |
1216 | cfg |= (1 << 18); /* enab early completion */ | 1225 | if (!IS_SOC(hpriv)) |
1226 | cfg |= (1 << 18); /* enab early completion */ | ||
1227 | } | ||
1217 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) | 1228 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1218 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | 1229 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ |
1219 | } | 1230 | } |
@@ -1221,8 +1232,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1221 | if (want_ncq) { | 1232 | if (want_ncq) { |
1222 | cfg |= EDMA_CFG_NCQ; | 1233 | cfg |= EDMA_CFG_NCQ; |
1223 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; | 1234 | pp->pp_flags |= MV_PP_FLAG_NCQ_EN; |
1224 | } else | 1235 | } |
1225 | pp->pp_flags &= ~MV_PP_FLAG_NCQ_EN; | ||
1226 | 1236 | ||
1227 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); | 1237 | writelfl(cfg, port_mmio + EDMA_CFG_OFS); |
1228 | } | 1238 | } |
@@ -1591,7 +1601,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1591 | return ata_sff_qc_issue(qc); | 1601 | return ata_sff_qc_issue(qc); |
1592 | } | 1602 | } |
1593 | 1603 | ||
1594 | mv_start_dma(ap, port_mmio, pp, qc->tf.protocol); | 1604 | mv_start_edma(ap, port_mmio, pp, qc->tf.protocol); |
1595 | 1605 | ||
1596 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; | 1606 | pp->req_idx = (pp->req_idx + 1) & MV_MAX_Q_DEPTH_MASK; |
1597 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; | 1607 | in_index = pp->req_idx << EDMA_REQ_Q_PTR_SHIFT; |