diff options
author | Mark Lord <liml@rtr.ca> | 2008-03-31 19:33:56 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@redhat.com> | 2008-04-17 15:44:21 -0400 |
commit | e12bef50b7660cf7c19d1cd3eac381b9eff734d7 (patch) | |
tree | 58bb601d948f6a1235fd6f402ef7ee62bc1695a1 /drivers/ata/sata_mv.c | |
parent | 83c063dd730cb56bf3fc89b70250ff9a398fec1e (diff) |
sata_mv cosmetic fixes
Various cosmetic fixes in preparation for real code changes later on.
Signed-off-by: Mark Lord <mlord@pobox.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 116 |
1 files changed, 59 insertions, 57 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index 9a89390531b1..ee6ca97c4545 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * sata_mv.c - Marvell SATA support | 2 | * sata_mv.c - Marvell SATA support |
3 | * | 3 | * |
4 | * Copyright 2008: Marvell Corporation, all rights reserved. | ||
4 | * Copyright 2005: EMC Corporation, all rights reserved. | 5 | * Copyright 2005: EMC Corporation, all rights reserved. |
5 | * Copyright 2005 Red Hat, Inc. All rights reserved. | 6 | * Copyright 2005 Red Hat, Inc. All rights reserved. |
6 | * | 7 | * |
@@ -61,7 +62,6 @@ | |||
61 | 62 | ||
62 | */ | 63 | */ |
63 | 64 | ||
64 | |||
65 | #include <linux/kernel.h> | 65 | #include <linux/kernel.h> |
66 | #include <linux/module.h> | 66 | #include <linux/module.h> |
67 | #include <linux/pci.h> | 67 | #include <linux/pci.h> |
@@ -131,7 +131,7 @@ enum { | |||
131 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 131 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
132 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 132 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
133 | /* SoC integrated controllers, no PCI interface */ | 133 | /* SoC integrated controllers, no PCI interface */ |
134 | MV_FLAG_SOC = (1 << 28), | 134 | MV_FLAG_SOC = (1 << 28), |
135 | 135 | ||
136 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 136 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
137 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | | 137 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
@@ -141,6 +141,7 @@ enum { | |||
141 | CRQB_FLAG_READ = (1 << 0), | 141 | CRQB_FLAG_READ = (1 << 0), |
142 | CRQB_TAG_SHIFT = 1, | 142 | CRQB_TAG_SHIFT = 1, |
143 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ | 143 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ |
144 | CRQB_PMP_SHIFT = 12, /* CRQB Gen-II/IIE PMP shift */ | ||
144 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ | 145 | CRQB_HOSTQ_SHIFT = 17, /* CRQB Gen-II/IIE HostQueTag shift */ |
145 | CRQB_CMD_ADDR_SHIFT = 8, | 146 | CRQB_CMD_ADDR_SHIFT = 8, |
146 | CRQB_CMD_CS = (0x2 << 11), | 147 | CRQB_CMD_CS = (0x2 << 11), |
@@ -199,7 +200,7 @@ enum { | |||
199 | TWSI_INT = (1 << 24), | 200 | TWSI_INT = (1 << 24), |
200 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | 201 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ |
201 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ | 202 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ |
202 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ | 203 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
203 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | | 204 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | |
204 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | | 205 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | |
205 | HC_MAIN_RSVD), | 206 | HC_MAIN_RSVD), |
@@ -223,13 +224,18 @@ enum { | |||
223 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | 224 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ |
224 | SATA_ACTIVE_OFS = 0x350, | 225 | SATA_ACTIVE_OFS = 0x350, |
225 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, | 226 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, |
227 | LTMODE_OFS = 0x30c, | ||
226 | PHY_MODE3 = 0x310, | 228 | PHY_MODE3 = 0x310, |
227 | PHY_MODE4 = 0x314, | 229 | PHY_MODE4 = 0x314, |
228 | PHY_MODE2 = 0x330, | 230 | PHY_MODE2 = 0x330, |
231 | SATA_IFCTL_OFS = 0x344, | ||
232 | SATA_IFSTAT_OFS = 0x34c, | ||
233 | VENDOR_UNIQUE_FIS_OFS = 0x35c, | ||
234 | FIS_CFG_OFS = 0x360, | ||
229 | MV5_PHY_MODE = 0x74, | 235 | MV5_PHY_MODE = 0x74, |
230 | MV5_LT_MODE = 0x30, | 236 | MV5_LT_MODE = 0x30, |
231 | MV5_PHY_CTL = 0x0C, | 237 | MV5_PHY_CTL = 0x0C, |
232 | SATA_INTERFACE_CTL = 0x050, | 238 | SATA_INTERFACE_CFG = 0x050, |
233 | 239 | ||
234 | MV_M2_PREAMP_MASK = 0x7e0, | 240 | MV_M2_PREAMP_MASK = 0x7e0, |
235 | 241 | ||
@@ -240,6 +246,8 @@ enum { | |||
240 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ | 246 | EDMA_CFG_NCQ_GO_ON_ERR = (1 << 14), /* continue on error */ |
241 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ | 247 | EDMA_CFG_RD_BRST_EXT = (1 << 11), /* read burst 512B */ |
242 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ | 248 | EDMA_CFG_WR_BUFF_LEN = (1 << 13), /* write buffer 512B */ |
249 | EDMA_CFG_EDMA_FBS = (1 << 16), /* EDMA FIS-Based Switching */ | ||
250 | EDMA_CFG_FBS = (1 << 26), /* FIS-Based Switching */ | ||
243 | 251 | ||
244 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, | 252 | EDMA_ERR_IRQ_CAUSE_OFS = 0x8, |
245 | EDMA_ERR_IRQ_MASK_OFS = 0xc, | 253 | EDMA_ERR_IRQ_MASK_OFS = 0xc, |
@@ -298,6 +306,7 @@ enum { | |||
298 | EDMA_ERR_LNK_DATA_RX | | 306 | EDMA_ERR_LNK_DATA_RX | |
299 | EDMA_ERR_LNK_DATA_TX | | 307 | EDMA_ERR_LNK_DATA_TX | |
300 | EDMA_ERR_TRANS_PROTO, | 308 | EDMA_ERR_TRANS_PROTO, |
309 | |||
301 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | | 310 | EDMA_EH_FREEZE_5 = EDMA_ERR_D_PAR | |
302 | EDMA_ERR_PRD_PAR | | 311 | EDMA_ERR_PRD_PAR | |
303 | EDMA_ERR_DEV_DCON | | 312 | EDMA_ERR_DEV_DCON | |
@@ -344,7 +353,6 @@ enum { | |||
344 | /* Port private flags (pp_flags) */ | 353 | /* Port private flags (pp_flags) */ |
345 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 354 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
346 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ | 355 | MV_PP_FLAG_NCQ_EN = (1 << 1), /* is EDMA set up for NCQ? */ |
347 | MV_PP_FLAG_HAD_A_RESET = (1 << 2), /* 1st hard reset complete? */ | ||
348 | }; | 356 | }; |
349 | 357 | ||
350 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) | 358 | #define IS_GEN_I(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_I) |
@@ -506,11 +514,11 @@ static void mv_soc_reset_flash(struct mv_host_priv *hpriv, | |||
506 | void __iomem *mmio); | 514 | void __iomem *mmio); |
507 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); | 515 | static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio); |
508 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); | 516 | static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio); |
509 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | 517 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
510 | unsigned int port_no); | 518 | unsigned int port_no); |
511 | static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, | 519 | static int mv_stop_edma(struct ata_port *ap); |
512 | void __iomem *port_mmio, int want_ncq); | 520 | static int mv_stop_edma_engine(struct ata_port *ap); |
513 | static int __mv_stop_dma(struct ata_port *ap); | 521 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq); |
514 | 522 | ||
515 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below | 523 | /* .sg_tablesize is (MV_MAX_SG_CT / 2) in the structures below |
516 | * because we have to allow room for worst case splitting of | 524 | * because we have to allow room for worst case splitting of |
@@ -714,6 +722,14 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port) | |||
714 | (mv_hardport_from_port(port) * MV_PORT_REG_SZ); | 722 | (mv_hardport_from_port(port) * MV_PORT_REG_SZ); |
715 | } | 723 | } |
716 | 724 | ||
725 | static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) | ||
726 | { | ||
727 | void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); | ||
728 | unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; | ||
729 | |||
730 | return hc_mmio + ofs; | ||
731 | } | ||
732 | |||
717 | static inline void __iomem *mv_host_base(struct ata_host *host) | 733 | static inline void __iomem *mv_host_base(struct ata_host *host) |
718 | { | 734 | { |
719 | struct mv_host_priv *hpriv = host->private_data; | 735 | struct mv_host_priv *hpriv = host->private_data; |
@@ -789,7 +805,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
789 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { | 805 | if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { |
790 | int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); | 806 | int using_ncq = ((pp->pp_flags & MV_PP_FLAG_NCQ_EN) != 0); |
791 | if (want_ncq != using_ncq) | 807 | if (want_ncq != using_ncq) |
792 | __mv_stop_dma(ap); | 808 | mv_stop_edma_engine(ap); |
793 | } | 809 | } |
794 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { | 810 | if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { |
795 | struct mv_host_priv *hpriv = ap->host->private_data; | 811 | struct mv_host_priv *hpriv = ap->host->private_data; |
@@ -810,7 +826,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
810 | hc_mmio + HC_IRQ_CAUSE_OFS); | 826 | hc_mmio + HC_IRQ_CAUSE_OFS); |
811 | } | 827 | } |
812 | 828 | ||
813 | mv_edma_cfg(pp, hpriv, port_mmio, want_ncq); | 829 | mv_edma_cfg(ap, want_ncq); |
814 | 830 | ||
815 | /* clear FIS IRQ Cause */ | 831 | /* clear FIS IRQ Cause */ |
816 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | 832 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); |
@@ -824,7 +840,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
824 | } | 840 | } |
825 | 841 | ||
826 | /** | 842 | /** |
827 | * __mv_stop_dma - Disable eDMA engine | 843 | * mv_stop_edma_engine - Disable eDMA engine |
828 | * @ap: ATA channel to manipulate | 844 | * @ap: ATA channel to manipulate |
829 | * | 845 | * |
830 | * Verify the local cache of the eDMA state is accurate with a | 846 | * Verify the local cache of the eDMA state is accurate with a |
@@ -833,7 +849,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
833 | * LOCKING: | 849 | * LOCKING: |
834 | * Inherited from caller. | 850 | * Inherited from caller. |
835 | */ | 851 | */ |
836 | static int __mv_stop_dma(struct ata_port *ap) | 852 | static int mv_stop_edma_engine(struct ata_port *ap) |
837 | { | 853 | { |
838 | void __iomem *port_mmio = mv_ap_base(ap); | 854 | void __iomem *port_mmio = mv_ap_base(ap); |
839 | struct mv_port_priv *pp = ap->private_data; | 855 | struct mv_port_priv *pp = ap->private_data; |
@@ -866,13 +882,13 @@ static int __mv_stop_dma(struct ata_port *ap) | |||
866 | return err; | 882 | return err; |
867 | } | 883 | } |
868 | 884 | ||
869 | static int mv_stop_dma(struct ata_port *ap) | 885 | static int mv_stop_edma(struct ata_port *ap) |
870 | { | 886 | { |
871 | unsigned long flags; | 887 | unsigned long flags; |
872 | int rc; | 888 | int rc; |
873 | 889 | ||
874 | spin_lock_irqsave(&ap->host->lock, flags); | 890 | spin_lock_irqsave(&ap->host->lock, flags); |
875 | rc = __mv_stop_dma(ap); | 891 | rc = mv_stop_edma_engine(ap); |
876 | spin_unlock_irqrestore(&ap->host->lock, flags); | 892 | spin_unlock_irqrestore(&ap->host->lock, flags); |
877 | 893 | ||
878 | return rc; | 894 | return rc; |
@@ -1007,10 +1023,12 @@ static void mv6_dev_config(struct ata_device *adev) | |||
1007 | adev->max_sectors = ATA_MAX_SECTORS; | 1023 | adev->max_sectors = ATA_MAX_SECTORS; |
1008 | } | 1024 | } |
1009 | 1025 | ||
1010 | static void mv_edma_cfg(struct mv_port_priv *pp, struct mv_host_priv *hpriv, | 1026 | static void mv_edma_cfg(struct ata_port *ap, int want_ncq) |
1011 | void __iomem *port_mmio, int want_ncq) | ||
1012 | { | 1027 | { |
1013 | u32 cfg; | 1028 | u32 cfg; |
1029 | struct mv_port_priv *pp = ap->private_data; | ||
1030 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
1031 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1014 | 1032 | ||
1015 | /* set up non-NCQ EDMA configuration */ | 1033 | /* set up non-NCQ EDMA configuration */ |
1016 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ | 1034 | cfg = EDMA_CFG_Q_DEPTH; /* always 0x1f for *all* chips */ |
@@ -1118,7 +1136,7 @@ static int mv_port_start(struct ata_port *ap) | |||
1118 | 1136 | ||
1119 | spin_lock_irqsave(&ap->host->lock, flags); | 1137 | spin_lock_irqsave(&ap->host->lock, flags); |
1120 | 1138 | ||
1121 | mv_edma_cfg(pp, hpriv, port_mmio, 0); | 1139 | mv_edma_cfg(ap, 0); |
1122 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 1140 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
1123 | 1141 | ||
1124 | spin_unlock_irqrestore(&ap->host->lock, flags); | 1142 | spin_unlock_irqrestore(&ap->host->lock, flags); |
@@ -1145,7 +1163,7 @@ out_port_free_dma_mem: | |||
1145 | */ | 1163 | */ |
1146 | static void mv_port_stop(struct ata_port *ap) | 1164 | static void mv_port_stop(struct ata_port *ap) |
1147 | { | 1165 | { |
1148 | mv_stop_dma(ap); | 1166 | mv_stop_edma(ap); |
1149 | mv_port_free_dma_mem(ap); | 1167 | mv_port_free_dma_mem(ap); |
1150 | } | 1168 | } |
1151 | 1169 | ||
@@ -1315,8 +1333,7 @@ static void mv_qc_prep_iie(struct ata_queued_cmd *qc) | |||
1315 | (qc->tf.protocol != ATA_PROT_NCQ)) | 1333 | (qc->tf.protocol != ATA_PROT_NCQ)) |
1316 | return; | 1334 | return; |
1317 | 1335 | ||
1318 | /* Fill in Gen IIE command request block | 1336 | /* Fill in Gen IIE command request block */ |
1319 | */ | ||
1320 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) | 1337 | if (!(qc->tf.flags & ATA_TFLAG_WRITE)) |
1321 | flags |= CRQB_FLAG_READ; | 1338 | flags |= CRQB_FLAG_READ; |
1322 | 1339 | ||
@@ -1384,7 +1401,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1384 | * port. Turn off EDMA so there won't be problems accessing | 1401 | * port. Turn off EDMA so there won't be problems accessing |
1385 | * shadow block, etc registers. | 1402 | * shadow block, etc registers. |
1386 | */ | 1403 | */ |
1387 | __mv_stop_dma(ap); | 1404 | mv_stop_edma_engine(ap); |
1388 | return ata_qc_issue_prot(qc); | 1405 | return ata_qc_issue_prot(qc); |
1389 | } | 1406 | } |
1390 | 1407 | ||
@@ -1407,10 +1424,10 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1407 | * @reset_allowed: bool: 0 == don't trigger from reset here | 1424 | * @reset_allowed: bool: 0 == don't trigger from reset here |
1408 | * | 1425 | * |
1409 | * In most cases, just clear the interrupt and move on. However, | 1426 | * In most cases, just clear the interrupt and move on. However, |
1410 | * some cases require an eDMA reset, which is done right before | 1427 | * some cases require an eDMA reset, which also performs a COMRESET. |
1411 | * the COMRESET in mv_phy_reset(). The SERR case requires a | 1428 | * The SERR case requires a clear of pending errors in the SATA |
1412 | * clear of pending errors in the SATA SERROR register. Finally, | 1429 | * SERROR register. Finally, if the port disabled DMA, |
1413 | * if the port disabled DMA, update our cached copy to match. | 1430 | * update our cached copy to match. |
1414 | * | 1431 | * |
1415 | * LOCKING: | 1432 | * LOCKING: |
1416 | * Inherited from caller. | 1433 | * Inherited from caller. |
@@ -1648,9 +1665,9 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc) | |||
1648 | pp = ap->private_data; | 1665 | pp = ap->private_data; |
1649 | 1666 | ||
1650 | shift = port << 1; /* (port * 2) */ | 1667 | shift = port << 1; /* (port * 2) */ |
1651 | if (port >= MV_PORTS_PER_HC) { | 1668 | if (port >= MV_PORTS_PER_HC) |
1652 | shift++; /* skip bit 8 in the HC Main IRQ reg */ | 1669 | shift++; /* skip bit 8 in the HC Main IRQ reg */ |
1653 | } | 1670 | |
1654 | have_err_bits = ((PORT0_ERR << shift) & relevant); | 1671 | have_err_bits = ((PORT0_ERR << shift) & relevant); |
1655 | 1672 | ||
1656 | if (unlikely(have_err_bits)) { | 1673 | if (unlikely(have_err_bits)) { |
@@ -1739,6 +1756,7 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
1739 | void __iomem *mmio = hpriv->base; | 1756 | void __iomem *mmio = hpriv->base; |
1740 | u32 irq_stat, irq_mask; | 1757 | u32 irq_stat, irq_mask; |
1741 | 1758 | ||
1759 | /* Note to self: &host->lock == &ap->host->lock == ap->lock */ | ||
1742 | spin_lock(&host->lock); | 1760 | spin_lock(&host->lock); |
1743 | 1761 | ||
1744 | irq_stat = readl(hpriv->main_cause_reg_addr); | 1762 | irq_stat = readl(hpriv->main_cause_reg_addr); |
@@ -1772,14 +1790,6 @@ out_unlock: | |||
1772 | return IRQ_RETVAL(handled); | 1790 | return IRQ_RETVAL(handled); |
1773 | } | 1791 | } |
1774 | 1792 | ||
1775 | static void __iomem *mv5_phy_base(void __iomem *mmio, unsigned int port) | ||
1776 | { | ||
1777 | void __iomem *hc_mmio = mv_hc_base_from_port(mmio, port); | ||
1778 | unsigned long ofs = (mv_hardport_from_port(port) + 1) * 0x100UL; | ||
1779 | |||
1780 | return hc_mmio + ofs; | ||
1781 | } | ||
1782 | |||
1783 | static unsigned int mv5_scr_offset(unsigned int sc_reg_in) | 1793 | static unsigned int mv5_scr_offset(unsigned int sc_reg_in) |
1784 | { | 1794 | { |
1785 | unsigned int ofs; | 1795 | unsigned int ofs; |
@@ -1907,7 +1917,7 @@ static void mv5_reset_hc_port(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
1907 | 1917 | ||
1908 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | 1918 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); |
1909 | 1919 | ||
1910 | mv_channel_reset(hpriv, mmio, port); | 1920 | mv_reset_channel(hpriv, mmio, port); |
1911 | 1921 | ||
1912 | ZERO(0x028); /* command */ | 1922 | ZERO(0x028); /* command */ |
1913 | writel(0x11f, port_mmio + EDMA_CFG_OFS); | 1923 | writel(0x11f, port_mmio + EDMA_CFG_OFS); |
@@ -2125,14 +2135,15 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2125 | m4 = readl(port_mmio + PHY_MODE4); | 2135 | m4 = readl(port_mmio + PHY_MODE4); |
2126 | 2136 | ||
2127 | if (hp_flags & MV_HP_ERRATA_60X1B2) | 2137 | if (hp_flags & MV_HP_ERRATA_60X1B2) |
2128 | tmp = readl(port_mmio + 0x310); | 2138 | tmp = readl(port_mmio + PHY_MODE3); |
2129 | 2139 | ||
2140 | /* workaround for errata FEr SATA#10 (part 1) */ | ||
2130 | m4 = (m4 & ~(1 << 1)) | (1 << 0); | 2141 | m4 = (m4 & ~(1 << 1)) | (1 << 0); |
2131 | 2142 | ||
2132 | writel(m4, port_mmio + PHY_MODE4); | 2143 | writel(m4, port_mmio + PHY_MODE4); |
2133 | 2144 | ||
2134 | if (hp_flags & MV_HP_ERRATA_60X1B2) | 2145 | if (hp_flags & MV_HP_ERRATA_60X1B2) |
2135 | writel(tmp, port_mmio + 0x310); | 2146 | writel(tmp, port_mmio + PHY_MODE3); |
2136 | } | 2147 | } |
2137 | 2148 | ||
2138 | /* Revert values of pre-emphasis and signal amps to the saved ones */ | 2149 | /* Revert values of pre-emphasis and signal amps to the saved ones */ |
@@ -2182,7 +2193,7 @@ static void mv_soc_reset_hc_port(struct mv_host_priv *hpriv, | |||
2182 | 2193 | ||
2183 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); | 2194 | writelfl(EDMA_DS, port_mmio + EDMA_CMD_OFS); |
2184 | 2195 | ||
2185 | mv_channel_reset(hpriv, mmio, port); | 2196 | mv_reset_channel(hpriv, mmio, port); |
2186 | 2197 | ||
2187 | ZERO(0x028); /* command */ | 2198 | ZERO(0x028); /* command */ |
2188 | writel(0x101f, port_mmio + EDMA_CFG_OFS); | 2199 | writel(0x101f, port_mmio + EDMA_CFG_OFS); |
@@ -2239,7 +2250,7 @@ static void mv_soc_reset_bus(struct ata_host *host, void __iomem *mmio) | |||
2239 | return; | 2250 | return; |
2240 | } | 2251 | } |
2241 | 2252 | ||
2242 | static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | 2253 | static void mv_reset_channel(struct mv_host_priv *hpriv, void __iomem *mmio, |
2243 | unsigned int port_no) | 2254 | unsigned int port_no) |
2244 | { | 2255 | { |
2245 | void __iomem *port_mmio = mv_port_base(mmio, port_no); | 2256 | void __iomem *port_mmio = mv_port_base(mmio, port_no); |
@@ -2247,10 +2258,10 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2247 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); | 2258 | writelfl(ATA_RST, port_mmio + EDMA_CMD_OFS); |
2248 | 2259 | ||
2249 | if (IS_GEN_II(hpriv)) { | 2260 | if (IS_GEN_II(hpriv)) { |
2250 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 2261 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); |
2251 | ifctl |= (1 << 7); /* enable gen2i speed */ | 2262 | ifctl |= (1 << 7); /* enable gen2i speed */ |
2252 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | 2263 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ |
2253 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | 2264 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); |
2254 | } | 2265 | } |
2255 | 2266 | ||
2256 | udelay(25); /* allow reset propagation */ | 2267 | udelay(25); /* allow reset propagation */ |
@@ -2372,14 +2383,7 @@ comreset_retry: | |||
2372 | 2383 | ||
2373 | static int mv_prereset(struct ata_link *link, unsigned long deadline) | 2384 | static int mv_prereset(struct ata_link *link, unsigned long deadline) |
2374 | { | 2385 | { |
2375 | struct ata_port *ap = link->ap; | 2386 | mv_stop_edma(link->ap); |
2376 | struct mv_port_priv *pp = ap->private_data; | ||
2377 | |||
2378 | mv_stop_dma(ap); | ||
2379 | |||
2380 | if (!(pp->pp_flags & MV_PP_FLAG_HAD_A_RESET)) | ||
2381 | pp->pp_flags |= MV_PP_FLAG_HAD_A_RESET; | ||
2382 | |||
2383 | return 0; | 2387 | return 0; |
2384 | } | 2388 | } |
2385 | 2389 | ||
@@ -2390,10 +2394,8 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2390 | struct mv_host_priv *hpriv = ap->host->private_data; | 2394 | struct mv_host_priv *hpriv = ap->host->private_data; |
2391 | void __iomem *mmio = hpriv->base; | 2395 | void __iomem *mmio = hpriv->base; |
2392 | 2396 | ||
2393 | mv_stop_dma(ap); | 2397 | mv_stop_edma(ap); |
2394 | 2398 | mv_reset_channel(hpriv, mmio, ap->port_no); | |
2395 | mv_channel_reset(hpriv, mmio, ap->port_no); | ||
2396 | |||
2397 | mv_phy_reset(ap, class, deadline); | 2399 | mv_phy_reset(ap, class, deadline); |
2398 | 2400 | ||
2399 | return 0; | 2401 | return 0; |
@@ -2715,10 +2717,10 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
2715 | if (IS_GEN_II(hpriv)) { | 2717 | if (IS_GEN_II(hpriv)) { |
2716 | void __iomem *port_mmio = mv_port_base(mmio, port); | 2718 | void __iomem *port_mmio = mv_port_base(mmio, port); |
2717 | 2719 | ||
2718 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CTL); | 2720 | u32 ifctl = readl(port_mmio + SATA_INTERFACE_CFG); |
2719 | ifctl |= (1 << 7); /* enable gen2i speed */ | 2721 | ifctl |= (1 << 7); /* enable gen2i speed */ |
2720 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ | 2722 | ifctl = (ifctl & 0xfff) | 0x9b1000; /* from chip spec */ |
2721 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CTL); | 2723 | writelfl(ifctl, port_mmio + SATA_INTERFACE_CFG); |
2722 | } | 2724 | } |
2723 | 2725 | ||
2724 | hpriv->ops->phy_errata(hpriv, mmio, port); | 2726 | hpriv->ops->phy_errata(hpriv, mmio, port); |