diff options
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 240 |
1 files changed, 128 insertions, 112 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index bb73b2222627..acf347f71a2f 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -72,7 +72,7 @@ | |||
72 | #include <linux/libata.h> | 72 | #include <linux/libata.h> |
73 | 73 | ||
74 | #define DRV_NAME "sata_mv" | 74 | #define DRV_NAME "sata_mv" |
75 | #define DRV_VERSION "1.20" | 75 | #define DRV_VERSION "1.24" |
76 | 76 | ||
77 | enum { | 77 | enum { |
78 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 78 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -122,14 +122,17 @@ enum { | |||
122 | /* Host Flags */ | 122 | /* Host Flags */ |
123 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 123 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
124 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 124 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
125 | /* SoC integrated controllers, no PCI interface */ | ||
126 | MV_FLAG_SOC = (1 << 28), | ||
127 | 125 | ||
128 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 126 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
129 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | | 127 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
130 | ATA_FLAG_PIO_POLLING, | 128 | ATA_FLAG_PIO_POLLING, |
129 | |||
131 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 130 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
132 | 131 | ||
132 | MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | ||
133 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
134 | ATA_FLAG_NCQ | ATA_FLAG_AN, | ||
135 | |||
133 | CRQB_FLAG_READ = (1 << 0), | 136 | CRQB_FLAG_READ = (1 << 0), |
134 | CRQB_TAG_SHIFT = 1, | 137 | CRQB_TAG_SHIFT = 1, |
135 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ | 138 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ |
@@ -197,13 +200,6 @@ enum { | |||
197 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | 200 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ |
198 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ | 201 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ |
199 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ | 202 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
200 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | | ||
201 | PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | | ||
202 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | | ||
203 | HC_MAIN_RSVD), | ||
204 | HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | | ||
205 | HC_MAIN_RSVD_5), | ||
206 | HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC), | ||
207 | 203 | ||
208 | /* SATAHC registers */ | 204 | /* SATAHC registers */ |
209 | HC_CFG_OFS = 0, | 205 | HC_CFG_OFS = 0, |
@@ -221,6 +217,7 @@ enum { | |||
221 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | 217 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ |
222 | SATA_ACTIVE_OFS = 0x350, | 218 | SATA_ACTIVE_OFS = 0x350, |
223 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, | 219 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, |
220 | SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ | ||
224 | 221 | ||
225 | LTMODE_OFS = 0x30c, | 222 | LTMODE_OFS = 0x30c, |
226 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ | 223 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ |
@@ -357,12 +354,12 @@ enum { | |||
357 | MV_HP_ERRATA_50XXB2 = (1 << 2), | 354 | MV_HP_ERRATA_50XXB2 = (1 << 2), |
358 | MV_HP_ERRATA_60X1B2 = (1 << 3), | 355 | MV_HP_ERRATA_60X1B2 = (1 << 3), |
359 | MV_HP_ERRATA_60X1C0 = (1 << 4), | 356 | MV_HP_ERRATA_60X1C0 = (1 << 4), |
360 | MV_HP_ERRATA_XX42A0 = (1 << 5), | ||
361 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ | 357 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ |
362 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ | 358 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ |
363 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ | 359 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ |
364 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ | 360 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
365 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ | 361 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ |
362 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ | ||
366 | 363 | ||
367 | /* Port private flags (pp_flags) */ | 364 | /* Port private flags (pp_flags) */ |
368 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 365 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
@@ -375,7 +372,7 @@ enum { | |||
375 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) | 372 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) |
376 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 373 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
377 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) | 374 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) |
378 | #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) | 375 | #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) |
379 | 376 | ||
380 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) | 377 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
381 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) | 378 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) |
@@ -459,6 +456,7 @@ struct mv_port_signal { | |||
459 | 456 | ||
460 | struct mv_host_priv { | 457 | struct mv_host_priv { |
461 | u32 hp_flags; | 458 | u32 hp_flags; |
459 | u32 main_irq_mask; | ||
462 | struct mv_port_signal signal[8]; | 460 | struct mv_port_signal signal[8]; |
463 | const struct mv_hw_ops *ops; | 461 | const struct mv_hw_ops *ops; |
464 | int n_ports; | 462 | int n_ports; |
@@ -640,25 +638,19 @@ static const struct ata_port_info mv_port_info[] = { | |||
640 | .port_ops = &mv6_ops, | 638 | .port_ops = &mv6_ops, |
641 | }, | 639 | }, |
642 | { /* chip_6042 */ | 640 | { /* chip_6042 */ |
643 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 641 | .flags = MV_GENIIE_FLAGS, |
644 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
645 | ATA_FLAG_NCQ, | ||
646 | .pio_mask = 0x1f, /* pio0-4 */ | 642 | .pio_mask = 0x1f, /* pio0-4 */ |
647 | .udma_mask = ATA_UDMA6, | 643 | .udma_mask = ATA_UDMA6, |
648 | .port_ops = &mv_iie_ops, | 644 | .port_ops = &mv_iie_ops, |
649 | }, | 645 | }, |
650 | { /* chip_7042 */ | 646 | { /* chip_7042 */ |
651 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 647 | .flags = MV_GENIIE_FLAGS, |
652 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
653 | ATA_FLAG_NCQ, | ||
654 | .pio_mask = 0x1f, /* pio0-4 */ | 648 | .pio_mask = 0x1f, /* pio0-4 */ |
655 | .udma_mask = ATA_UDMA6, | 649 | .udma_mask = ATA_UDMA6, |
656 | .port_ops = &mv_iie_ops, | 650 | .port_ops = &mv_iie_ops, |
657 | }, | 651 | }, |
658 | { /* chip_soc */ | 652 | { /* chip_soc */ |
659 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 653 | .flags = MV_GENIIE_FLAGS, |
660 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
661 | ATA_FLAG_NCQ | MV_FLAG_SOC, | ||
662 | .pio_mask = 0x1f, /* pio0-4 */ | 654 | .pio_mask = 0x1f, /* pio0-4 */ |
663 | .udma_mask = ATA_UDMA6, | 655 | .udma_mask = ATA_UDMA6, |
664 | .port_ops = &mv_iie_ops, | 656 | .port_ops = &mv_iie_ops, |
@@ -818,12 +810,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
818 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 810 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); |
819 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, | 811 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
820 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 812 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
821 | 813 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | |
822 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
823 | writelfl((pp->crqb_dma & 0xffffffff) | index, | ||
824 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
825 | else | ||
826 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
827 | 814 | ||
828 | /* | 815 | /* |
829 | * initialize response queue | 816 | * initialize response queue |
@@ -833,17 +820,38 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
833 | 820 | ||
834 | WARN_ON(pp->crpb_dma & 0xff); | 821 | WARN_ON(pp->crpb_dma & 0xff); |
835 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | 822 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); |
836 | 823 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | |
837 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
838 | writelfl((pp->crpb_dma & 0xffffffff) | index, | ||
839 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
840 | else | ||
841 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
842 | |||
843 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, | 824 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
844 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 825 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
845 | } | 826 | } |
846 | 827 | ||
828 | static void mv_set_main_irq_mask(struct ata_host *host, | ||
829 | u32 disable_bits, u32 enable_bits) | ||
830 | { | ||
831 | struct mv_host_priv *hpriv = host->private_data; | ||
832 | u32 old_mask, new_mask; | ||
833 | |||
834 | old_mask = hpriv->main_irq_mask; | ||
835 | new_mask = (old_mask & ~disable_bits) | enable_bits; | ||
836 | if (new_mask != old_mask) { | ||
837 | hpriv->main_irq_mask = new_mask; | ||
838 | writelfl(new_mask, hpriv->main_irq_mask_addr); | ||
839 | } | ||
840 | } | ||
841 | |||
842 | static void mv_enable_port_irqs(struct ata_port *ap, | ||
843 | unsigned int port_bits) | ||
844 | { | ||
845 | unsigned int shift, hardport, port = ap->port_no; | ||
846 | u32 disable_bits, enable_bits; | ||
847 | |||
848 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | ||
849 | |||
850 | disable_bits = (DONE_IRQ | ERR_IRQ) << shift; | ||
851 | enable_bits = port_bits << shift; | ||
852 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); | ||
853 | } | ||
854 | |||
847 | /** | 855 | /** |
848 | * mv_start_dma - Enable eDMA engine | 856 | * mv_start_dma - Enable eDMA engine |
849 | * @base: port base address | 857 | * @base: port base address |
@@ -886,9 +894,11 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
886 | mv_edma_cfg(ap, want_ncq); | 894 | mv_edma_cfg(ap, want_ncq); |
887 | 895 | ||
888 | /* clear FIS IRQ Cause */ | 896 | /* clear FIS IRQ Cause */ |
889 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | 897 | if (IS_GEN_IIE(hpriv)) |
898 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
890 | 899 | ||
891 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 900 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
901 | mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ); | ||
892 | 902 | ||
893 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); | 903 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); |
894 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 904 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
@@ -1231,7 +1241,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1231 | 1241 | ||
1232 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | 1242 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
1233 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1243 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ |
1234 | if (HAS_PCI(ap->host)) | 1244 | if (!IS_SOC(hpriv)) |
1235 | cfg |= (1 << 18); /* enab early completion */ | 1245 | cfg |= (1 << 18); /* enab early completion */ |
1236 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) | 1246 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1237 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | 1247 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ |
@@ -1341,6 +1351,7 @@ out_port_free_dma_mem: | |||
1341 | static void mv_port_stop(struct ata_port *ap) | 1351 | static void mv_port_stop(struct ata_port *ap) |
1342 | { | 1352 | { |
1343 | mv_stop_edma(ap); | 1353 | mv_stop_edma(ap); |
1354 | mv_enable_port_irqs(ap, 0); | ||
1344 | mv_port_free_dma_mem(ap); | 1355 | mv_port_free_dma_mem(ap); |
1345 | } | 1356 | } |
1346 | 1357 | ||
@@ -1582,6 +1593,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1582 | * shadow block, etc registers. | 1593 | * shadow block, etc registers. |
1583 | */ | 1594 | */ |
1584 | mv_stop_edma(ap); | 1595 | mv_stop_edma(ap); |
1596 | mv_enable_port_irqs(ap, ERR_IRQ); | ||
1585 | mv_pmp_select(ap, qc->dev->link->pmp); | 1597 | mv_pmp_select(ap, qc->dev->link->pmp); |
1586 | return ata_sff_qc_issue(qc); | 1598 | return ata_sff_qc_issue(qc); |
1587 | } | 1599 | } |
@@ -1670,6 +1682,18 @@ static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) | |||
1670 | } | 1682 | } |
1671 | } | 1683 | } |
1672 | 1684 | ||
1685 | static int mv_req_q_empty(struct ata_port *ap) | ||
1686 | { | ||
1687 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1688 | u32 in_ptr, out_ptr; | ||
1689 | |||
1690 | in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) | ||
1691 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1692 | out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) | ||
1693 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1694 | return (in_ptr == out_ptr); /* 1 == queue_is_empty */ | ||
1695 | } | ||
1696 | |||
1673 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) | 1697 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) |
1674 | { | 1698 | { |
1675 | struct mv_port_priv *pp = ap->private_data; | 1699 | struct mv_port_priv *pp = ap->private_data; |
@@ -1703,7 +1727,7 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) | |||
1703 | ap->qc_active, failed_links, | 1727 | ap->qc_active, failed_links, |
1704 | ap->nr_active_links); | 1728 | ap->nr_active_links); |
1705 | 1729 | ||
1706 | if (ap->nr_active_links <= failed_links) { | 1730 | if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { |
1707 | mv_process_crpb_entries(ap, pp); | 1731 | mv_process_crpb_entries(ap, pp); |
1708 | mv_stop_edma(ap); | 1732 | mv_stop_edma(ap); |
1709 | mv_eh_freeze(ap); | 1733 | mv_eh_freeze(ap); |
@@ -1812,6 +1836,7 @@ static void mv_err_intr(struct ata_port *ap) | |||
1812 | { | 1836 | { |
1813 | void __iomem *port_mmio = mv_ap_base(ap); | 1837 | void __iomem *port_mmio = mv_ap_base(ap); |
1814 | u32 edma_err_cause, eh_freeze_mask, serr = 0; | 1838 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
1839 | u32 fis_cause = 0; | ||
1815 | struct mv_port_priv *pp = ap->private_data; | 1840 | struct mv_port_priv *pp = ap->private_data; |
1816 | struct mv_host_priv *hpriv = ap->host->private_data; | 1841 | struct mv_host_priv *hpriv = ap->host->private_data; |
1817 | unsigned int action = 0, err_mask = 0; | 1842 | unsigned int action = 0, err_mask = 0; |
@@ -1821,16 +1846,19 @@ static void mv_err_intr(struct ata_port *ap) | |||
1821 | 1846 | ||
1822 | /* | 1847 | /* |
1823 | * Read and clear the SError and err_cause bits. | 1848 | * Read and clear the SError and err_cause bits. |
1849 | * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear | ||
1850 | * the FIS_IRQ_CAUSE register before clearing edma_err_cause. | ||
1824 | */ | 1851 | */ |
1825 | sata_scr_read(&ap->link, SCR_ERROR, &serr); | 1852 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
1826 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | 1853 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); |
1827 | 1854 | ||
1828 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1855 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1856 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { | ||
1857 | fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
1858 | writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
1859 | } | ||
1829 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1860 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1830 | 1861 | ||
1831 | ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", | ||
1832 | __func__, edma_err_cause, pp->pp_flags); | ||
1833 | |||
1834 | if (edma_err_cause & EDMA_ERR_DEV) { | 1862 | if (edma_err_cause & EDMA_ERR_DEV) { |
1835 | /* | 1863 | /* |
1836 | * Device errors during FIS-based switching operation | 1864 | * Device errors during FIS-based switching operation |
@@ -1844,6 +1872,18 @@ static void mv_err_intr(struct ata_port *ap) | |||
1844 | ata_ehi_clear_desc(ehi); | 1872 | ata_ehi_clear_desc(ehi); |
1845 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", | 1873 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", |
1846 | edma_err_cause, pp->pp_flags); | 1874 | edma_err_cause, pp->pp_flags); |
1875 | |||
1876 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { | ||
1877 | ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); | ||
1878 | if (fis_cause & SATA_FIS_IRQ_AN) { | ||
1879 | u32 ec = edma_err_cause & | ||
1880 | ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); | ||
1881 | sata_async_notification(ap); | ||
1882 | if (!ec) | ||
1883 | return; /* Just an AN; no need for the nukes */ | ||
1884 | ata_ehi_push_desc(ehi, "SDB notify"); | ||
1885 | } | ||
1886 | } | ||
1847 | /* | 1887 | /* |
1848 | * All generations share these EDMA error cause bits: | 1888 | * All generations share these EDMA error cause bits: |
1849 | */ | 1889 | */ |
@@ -2162,20 +2202,20 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
2162 | struct ata_host *host = dev_instance; | 2202 | struct ata_host *host = dev_instance; |
2163 | struct mv_host_priv *hpriv = host->private_data; | 2203 | struct mv_host_priv *hpriv = host->private_data; |
2164 | unsigned int handled = 0; | 2204 | unsigned int handled = 0; |
2165 | u32 main_irq_cause, main_irq_mask; | 2205 | u32 main_irq_cause, pending_irqs; |
2166 | 2206 | ||
2167 | spin_lock(&host->lock); | 2207 | spin_lock(&host->lock); |
2168 | main_irq_cause = readl(hpriv->main_irq_cause_addr); | 2208 | main_irq_cause = readl(hpriv->main_irq_cause_addr); |
2169 | main_irq_mask = readl(hpriv->main_irq_mask_addr); | 2209 | pending_irqs = main_irq_cause & hpriv->main_irq_mask; |
2170 | /* | 2210 | /* |
2171 | * Deal with cases where we either have nothing pending, or have read | 2211 | * Deal with cases where we either have nothing pending, or have read |
2172 | * a bogus register value which can indicate HW removal or PCI fault. | 2212 | * a bogus register value which can indicate HW removal or PCI fault. |
2173 | */ | 2213 | */ |
2174 | if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) { | 2214 | if (pending_irqs && main_irq_cause != 0xffffffffU) { |
2175 | if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host))) | 2215 | if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) |
2176 | handled = mv_pci_error(host, hpriv->base); | 2216 | handled = mv_pci_error(host, hpriv->base); |
2177 | else | 2217 | else |
2178 | handled = mv_host_intr(host, main_irq_cause); | 2218 | handled = mv_host_intr(host, pending_irqs); |
2179 | } | 2219 | } |
2180 | spin_unlock(&host->lock); | 2220 | spin_unlock(&host->lock); |
2181 | return IRQ_RETVAL(handled); | 2221 | return IRQ_RETVAL(handled); |
@@ -2373,7 +2413,6 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) | |||
2373 | ZERO(MV_PCI_DISC_TIMER); | 2413 | ZERO(MV_PCI_DISC_TIMER); |
2374 | ZERO(MV_PCI_MSI_TRIGGER); | 2414 | ZERO(MV_PCI_MSI_TRIGGER); |
2375 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); | 2415 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); |
2376 | ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); | ||
2377 | ZERO(MV_PCI_SERR_MASK); | 2416 | ZERO(MV_PCI_SERR_MASK); |
2378 | ZERO(hpriv->irq_cause_ofs); | 2417 | ZERO(hpriv->irq_cause_ofs); |
2379 | ZERO(hpriv->irq_mask_ofs); | 2418 | ZERO(hpriv->irq_mask_ofs); |
@@ -2495,7 +2534,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2495 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | 2534 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
2496 | int fix_phy_mode4 = | 2535 | int fix_phy_mode4 = |
2497 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | 2536 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
2498 | u32 m2, tmp; | 2537 | u32 m2, m3; |
2499 | 2538 | ||
2500 | if (fix_phy_mode2) { | 2539 | if (fix_phy_mode2) { |
2501 | m2 = readl(port_mmio + PHY_MODE2); | 2540 | m2 = readl(port_mmio + PHY_MODE2); |
@@ -2512,28 +2551,37 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2512 | udelay(200); | 2551 | udelay(200); |
2513 | } | 2552 | } |
2514 | 2553 | ||
2515 | /* who knows what this magic does */ | 2554 | /* |
2516 | tmp = readl(port_mmio + PHY_MODE3); | 2555 | * Gen-II/IIe PHY_MODE3 errata RM#2: |
2517 | tmp &= ~0x7F800000; | 2556 | * Achieves better receiver noise performance than the h/w default: |
2518 | tmp |= 0x2A800000; | 2557 | */ |
2519 | writel(tmp, port_mmio + PHY_MODE3); | 2558 | m3 = readl(port_mmio + PHY_MODE3); |
2559 | m3 = (m3 & 0x1f) | (0x5555601 << 5); | ||
2560 | |||
2561 | /* Guideline 88F5182 (GL# SATA-S11) */ | ||
2562 | if (IS_SOC(hpriv)) | ||
2563 | m3 &= ~0x1c; | ||
2520 | 2564 | ||
2521 | if (fix_phy_mode4) { | 2565 | if (fix_phy_mode4) { |
2522 | u32 m4; | 2566 | u32 m4; |
2523 | 2567 | ||
2524 | m4 = readl(port_mmio + PHY_MODE4); | 2568 | m4 = readl(port_mmio + PHY_MODE4); |
2525 | 2569 | ||
2526 | if (hp_flags & MV_HP_ERRATA_60X1B2) | ||
2527 | tmp = readl(port_mmio + PHY_MODE3); | ||
2528 | |||
2529 | /* workaround for errata FEr SATA#10 (part 1) */ | 2570 | /* workaround for errata FEr SATA#10 (part 1) */ |
2530 | m4 = (m4 & ~(1 << 1)) | (1 << 0); | 2571 | m4 = (m4 & ~(1 << 1)) | (1 << 0); |
2531 | 2572 | ||
2532 | writel(m4, port_mmio + PHY_MODE4); | 2573 | /* enforce bit restrictions on GenIIe devices */ |
2574 | if (IS_GEN_IIE(hpriv)) | ||
2575 | m4 = (m4 & ~0x5DE3FFFC) | (1 << 2); | ||
2533 | 2576 | ||
2534 | if (hp_flags & MV_HP_ERRATA_60X1B2) | 2577 | writel(m4, port_mmio + PHY_MODE4); |
2535 | writel(tmp, port_mmio + PHY_MODE3); | ||
2536 | } | 2578 | } |
2579 | /* | ||
2580 | * Workaround for 60x1-B2 errata SATA#13: | ||
2581 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, | ||
2582 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. | ||
2583 | */ | ||
2584 | writel(m3, port_mmio + PHY_MODE3); | ||
2537 | 2585 | ||
2538 | /* Revert values of pre-emphasis and signal amps to the saved ones */ | 2586 | /* Revert values of pre-emphasis and signal amps to the saved ones */ |
2539 | m2 = readl(port_mmio + PHY_MODE2); | 2587 | m2 = readl(port_mmio + PHY_MODE2); |
@@ -2728,6 +2776,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2728 | 2776 | ||
2729 | rc = sata_link_hardreset(link, timing, deadline + extra, | 2777 | rc = sata_link_hardreset(link, timing, deadline + extra, |
2730 | &online, NULL); | 2778 | &online, NULL); |
2779 | rc = online ? -EAGAIN : rc; | ||
2731 | if (rc) | 2780 | if (rc) |
2732 | return rc; | 2781 | return rc; |
2733 | sata_scr_read(link, SCR_STATUS, &sstatus); | 2782 | sata_scr_read(link, SCR_STATUS, &sstatus); |
@@ -2744,32 +2793,18 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2744 | 2793 | ||
2745 | static void mv_eh_freeze(struct ata_port *ap) | 2794 | static void mv_eh_freeze(struct ata_port *ap) |
2746 | { | 2795 | { |
2747 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
2748 | unsigned int shift, hardport, port = ap->port_no; | ||
2749 | u32 main_irq_mask; | ||
2750 | |||
2751 | /* FIXME: handle coalescing completion events properly */ | ||
2752 | |||
2753 | mv_stop_edma(ap); | 2796 | mv_stop_edma(ap); |
2754 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | 2797 | mv_enable_port_irqs(ap, 0); |
2755 | |||
2756 | /* disable assertion of portN err, done events */ | ||
2757 | main_irq_mask = readl(hpriv->main_irq_mask_addr); | ||
2758 | main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift); | ||
2759 | writelfl(main_irq_mask, hpriv->main_irq_mask_addr); | ||
2760 | } | 2798 | } |
2761 | 2799 | ||
2762 | static void mv_eh_thaw(struct ata_port *ap) | 2800 | static void mv_eh_thaw(struct ata_port *ap) |
2763 | { | 2801 | { |
2764 | struct mv_host_priv *hpriv = ap->host->private_data; | 2802 | struct mv_host_priv *hpriv = ap->host->private_data; |
2765 | unsigned int shift, hardport, port = ap->port_no; | 2803 | unsigned int port = ap->port_no; |
2804 | unsigned int hardport = mv_hardport_from_port(port); | ||
2766 | void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); | 2805 | void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); |
2767 | void __iomem *port_mmio = mv_ap_base(ap); | 2806 | void __iomem *port_mmio = mv_ap_base(ap); |
2768 | u32 main_irq_mask, hc_irq_cause; | 2807 | u32 hc_irq_cause; |
2769 | |||
2770 | /* FIXME: handle coalescing completion events properly */ | ||
2771 | |||
2772 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | ||
2773 | 2808 | ||
2774 | /* clear EDMA errors on this port */ | 2809 | /* clear EDMA errors on this port */ |
2775 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 2810 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
@@ -2779,10 +2814,7 @@ static void mv_eh_thaw(struct ata_port *ap) | |||
2779 | hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); | 2814 | hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); |
2780 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 2815 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
2781 | 2816 | ||
2782 | /* enable assertion of portN err, done events */ | 2817 | mv_enable_port_irqs(ap, ERR_IRQ); |
2783 | main_irq_mask = readl(hpriv->main_irq_mask_addr); | ||
2784 | main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift); | ||
2785 | writelfl(main_irq_mask, hpriv->main_irq_mask_addr); | ||
2786 | } | 2818 | } |
2787 | 2819 | ||
2788 | /** | 2820 | /** |
@@ -2840,7 +2872,7 @@ static unsigned int mv_in_pcix_mode(struct ata_host *host) | |||
2840 | void __iomem *mmio = hpriv->base; | 2872 | void __iomem *mmio = hpriv->base; |
2841 | u32 reg; | 2873 | u32 reg; |
2842 | 2874 | ||
2843 | if (!HAS_PCI(host) || !IS_PCIE(hpriv)) | 2875 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) |
2844 | return 0; /* not PCI-X capable */ | 2876 | return 0; /* not PCI-X capable */ |
2845 | reg = readl(mmio + MV_PCI_MODE_OFS); | 2877 | reg = readl(mmio + MV_PCI_MODE_OFS); |
2846 | if ((reg & MV_PCI_MODE_MASK) == 0) | 2878 | if ((reg & MV_PCI_MODE_MASK) == 0) |
@@ -2967,10 +2999,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2967 | hp_flags |= MV_HP_CUT_THROUGH; | 2999 | hp_flags |= MV_HP_CUT_THROUGH; |
2968 | 3000 | ||
2969 | switch (pdev->revision) { | 3001 | switch (pdev->revision) { |
2970 | case 0x0: | 3002 | case 0x2: /* Rev.B0: the first/only public release */ |
2971 | hp_flags |= MV_HP_ERRATA_XX42A0; | ||
2972 | break; | ||
2973 | case 0x1: | ||
2974 | hp_flags |= MV_HP_ERRATA_60X1C0; | 3003 | hp_flags |= MV_HP_ERRATA_60X1C0; |
2975 | break; | 3004 | break; |
2976 | default: | 3005 | default: |
@@ -2982,7 +3011,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2982 | break; | 3011 | break; |
2983 | case chip_soc: | 3012 | case chip_soc: |
2984 | hpriv->ops = &mv_soc_ops; | 3013 | hpriv->ops = &mv_soc_ops; |
2985 | hp_flags |= MV_HP_ERRATA_60X1C0; | 3014 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0; |
2986 | break; | 3015 | break; |
2987 | 3016 | ||
2988 | default: | 3017 | default: |
@@ -3026,16 +3055,16 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3026 | if (rc) | 3055 | if (rc) |
3027 | goto done; | 3056 | goto done; |
3028 | 3057 | ||
3029 | if (HAS_PCI(host)) { | 3058 | if (IS_SOC(hpriv)) { |
3030 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | ||
3031 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | ||
3032 | } else { | ||
3033 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; | 3059 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; |
3034 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; | 3060 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; |
3061 | } else { | ||
3062 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | ||
3063 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | ||
3035 | } | 3064 | } |
3036 | 3065 | ||
3037 | /* global interrupt mask: 0 == mask everything */ | 3066 | /* global interrupt mask: 0 == mask everything */ |
3038 | writel(0, hpriv->main_irq_mask_addr); | 3067 | mv_set_main_irq_mask(host, ~0, 0); |
3039 | 3068 | ||
3040 | n_hc = mv_get_hc_count(host->ports[0]->flags); | 3069 | n_hc = mv_get_hc_count(host->ports[0]->flags); |
3041 | 3070 | ||
@@ -3057,7 +3086,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3057 | mv_port_init(&ap->ioaddr, port_mmio); | 3086 | mv_port_init(&ap->ioaddr, port_mmio); |
3058 | 3087 | ||
3059 | #ifdef CONFIG_PCI | 3088 | #ifdef CONFIG_PCI |
3060 | if (HAS_PCI(host)) { | 3089 | if (!IS_SOC(hpriv)) { |
3061 | unsigned int offset = port_mmio - mmio; | 3090 | unsigned int offset = port_mmio - mmio; |
3062 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); | 3091 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); |
3063 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); | 3092 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); |
@@ -3077,31 +3106,18 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3077 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | 3106 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); |
3078 | } | 3107 | } |
3079 | 3108 | ||
3080 | if (HAS_PCI(host)) { | 3109 | if (!IS_SOC(hpriv)) { |
3081 | /* Clear any currently outstanding host interrupt conditions */ | 3110 | /* Clear any currently outstanding host interrupt conditions */ |
3082 | writelfl(0, mmio + hpriv->irq_cause_ofs); | 3111 | writelfl(0, mmio + hpriv->irq_cause_ofs); |
3083 | 3112 | ||
3084 | /* and unmask interrupt generation for host regs */ | 3113 | /* and unmask interrupt generation for host regs */ |
3085 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); | 3114 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); |
3086 | if (IS_GEN_I(hpriv)) | 3115 | |
3087 | writelfl(~HC_MAIN_MASKED_IRQS_5, | 3116 | /* |
3088 | hpriv->main_irq_mask_addr); | 3117 | * enable only global host interrupts for now. |
3089 | else | 3118 | * The per-port interrupts get done later as ports are set up. |
3090 | writelfl(~HC_MAIN_MASKED_IRQS, | 3119 | */ |
3091 | hpriv->main_irq_mask_addr); | 3120 | mv_set_main_irq_mask(host, 0, PCI_ERR); |
3092 | |||
3093 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " | ||
3094 | "PCI int cause/mask=0x%08x/0x%08x\n", | ||
3095 | readl(hpriv->main_irq_cause_addr), | ||
3096 | readl(hpriv->main_irq_mask_addr), | ||
3097 | readl(mmio + hpriv->irq_cause_ofs), | ||
3098 | readl(mmio + hpriv->irq_mask_ofs)); | ||
3099 | } else { | ||
3100 | writelfl(~HC_MAIN_MASKED_IRQS_SOC, | ||
3101 | hpriv->main_irq_mask_addr); | ||
3102 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n", | ||
3103 | readl(hpriv->main_irq_cause_addr), | ||
3104 | readl(hpriv->main_irq_mask_addr)); | ||
3105 | } | 3121 | } |
3106 | done: | 3122 | done: |
3107 | return rc; | 3123 | return rc; |