aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c258
1 files changed, 139 insertions, 119 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index bb73b2222627..60391e9a84db 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -72,7 +72,7 @@
72#include <linux/libata.h> 72#include <linux/libata.h>
73 73
74#define DRV_NAME "sata_mv" 74#define DRV_NAME "sata_mv"
75#define DRV_VERSION "1.20" 75#define DRV_VERSION "1.24"
76 76
77enum { 77enum {
78 /* BAR's are enumerated in terms of pci_resource_start() terms */ 78 /* BAR's are enumerated in terms of pci_resource_start() terms */
@@ -122,14 +122,17 @@ enum {
122 /* Host Flags */ 122 /* Host Flags */
123 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 123 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
124 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ 124 MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */
125 /* SoC integrated controllers, no PCI interface */
126 MV_FLAG_SOC = (1 << 28),
127 125
128 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | 126 MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY |
129 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | 127 ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI |
130 ATA_FLAG_PIO_POLLING, 128 ATA_FLAG_PIO_POLLING,
129
131 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, 130 MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE,
132 131
132 MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS |
133 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
134 ATA_FLAG_NCQ | ATA_FLAG_AN,
135
133 CRQB_FLAG_READ = (1 << 0), 136 CRQB_FLAG_READ = (1 << 0),
134 CRQB_TAG_SHIFT = 1, 137 CRQB_TAG_SHIFT = 1,
135 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ 138 CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */
@@ -197,13 +200,6 @@ enum {
197 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ 200 HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */
198 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ 201 HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */
199 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ 202 HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */
200 HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE |
201 PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
202 PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT |
203 HC_MAIN_RSVD),
204 HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE |
205 HC_MAIN_RSVD_5),
206 HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC),
207 203
208 /* SATAHC registers */ 204 /* SATAHC registers */
209 HC_CFG_OFS = 0, 205 HC_CFG_OFS = 0,
@@ -221,12 +217,18 @@ enum {
221 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ 217 SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */
222 SATA_ACTIVE_OFS = 0x350, 218 SATA_ACTIVE_OFS = 0x350,
223 SATA_FIS_IRQ_CAUSE_OFS = 0x364, 219 SATA_FIS_IRQ_CAUSE_OFS = 0x364,
220 SATA_FIS_IRQ_AN = (1 << 9), /* async notification */
224 221
225 LTMODE_OFS = 0x30c, 222 LTMODE_OFS = 0x30c,
226 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ 223 LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */
227 224
228 PHY_MODE3 = 0x310, 225 PHY_MODE3 = 0x310,
229 PHY_MODE4 = 0x314, 226 PHY_MODE4 = 0x314,
227 PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */
228 PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */
229 PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */
230 PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */
231
230 PHY_MODE2 = 0x330, 232 PHY_MODE2 = 0x330,
231 SATA_IFCTL_OFS = 0x344, 233 SATA_IFCTL_OFS = 0x344,
232 SATA_TESTCTL_OFS = 0x348, 234 SATA_TESTCTL_OFS = 0x348,
@@ -357,12 +359,12 @@ enum {
357 MV_HP_ERRATA_50XXB2 = (1 << 2), 359 MV_HP_ERRATA_50XXB2 = (1 << 2),
358 MV_HP_ERRATA_60X1B2 = (1 << 3), 360 MV_HP_ERRATA_60X1B2 = (1 << 3),
359 MV_HP_ERRATA_60X1C0 = (1 << 4), 361 MV_HP_ERRATA_60X1C0 = (1 << 4),
360 MV_HP_ERRATA_XX42A0 = (1 << 5),
361 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ 362 MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */
362 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ 363 MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */
363 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ 364 MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */
364 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ 365 MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */
365 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ 366 MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */
367 MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */
366 368
367 /* Port private flags (pp_flags) */ 369 /* Port private flags (pp_flags) */
368 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ 370 MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */
@@ -375,7 +377,7 @@ enum {
375#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) 377#define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II)
376#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) 378#define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE)
377#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) 379#define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE)
378#define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) 380#define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC)
379 381
380#define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) 382#define WINDOW_CTRL(i) (0x20030 + ((i) << 4))
381#define WINDOW_BASE(i) (0x20034 + ((i) << 4)) 383#define WINDOW_BASE(i) (0x20034 + ((i) << 4))
@@ -459,6 +461,7 @@ struct mv_port_signal {
459 461
460struct mv_host_priv { 462struct mv_host_priv {
461 u32 hp_flags; 463 u32 hp_flags;
464 u32 main_irq_mask;
462 struct mv_port_signal signal[8]; 465 struct mv_port_signal signal[8];
463 const struct mv_hw_ops *ops; 466 const struct mv_hw_ops *ops;
464 int n_ports; 467 int n_ports;
@@ -640,25 +643,19 @@ static const struct ata_port_info mv_port_info[] = {
640 .port_ops = &mv6_ops, 643 .port_ops = &mv6_ops,
641 }, 644 },
642 { /* chip_6042 */ 645 { /* chip_6042 */
643 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 646 .flags = MV_GENIIE_FLAGS,
644 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
645 ATA_FLAG_NCQ,
646 .pio_mask = 0x1f, /* pio0-4 */ 647 .pio_mask = 0x1f, /* pio0-4 */
647 .udma_mask = ATA_UDMA6, 648 .udma_mask = ATA_UDMA6,
648 .port_ops = &mv_iie_ops, 649 .port_ops = &mv_iie_ops,
649 }, 650 },
650 { /* chip_7042 */ 651 { /* chip_7042 */
651 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 652 .flags = MV_GENIIE_FLAGS,
652 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
653 ATA_FLAG_NCQ,
654 .pio_mask = 0x1f, /* pio0-4 */ 653 .pio_mask = 0x1f, /* pio0-4 */
655 .udma_mask = ATA_UDMA6, 654 .udma_mask = ATA_UDMA6,
656 .port_ops = &mv_iie_ops, 655 .port_ops = &mv_iie_ops,
657 }, 656 },
658 { /* chip_soc */ 657 { /* chip_soc */
659 .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | 658 .flags = MV_GENIIE_FLAGS,
660 ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA |
661 ATA_FLAG_NCQ | MV_FLAG_SOC,
662 .pio_mask = 0x1f, /* pio0-4 */ 659 .pio_mask = 0x1f, /* pio0-4 */
663 .udma_mask = ATA_UDMA6, 660 .udma_mask = ATA_UDMA6,
664 .port_ops = &mv_iie_ops, 661 .port_ops = &mv_iie_ops,
@@ -818,12 +815,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
818 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); 815 writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS);
819 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, 816 writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index,
820 port_mmio + EDMA_REQ_Q_IN_PTR_OFS); 817 port_mmio + EDMA_REQ_Q_IN_PTR_OFS);
821 818 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
822 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
823 writelfl((pp->crqb_dma & 0xffffffff) | index,
824 port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
825 else
826 writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS);
827 819
828 /* 820 /*
829 * initialize response queue 821 * initialize response queue
@@ -833,17 +825,38 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio,
833 825
834 WARN_ON(pp->crpb_dma & 0xff); 826 WARN_ON(pp->crpb_dma & 0xff);
835 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); 827 writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS);
836 828 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
837 if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0)
838 writelfl((pp->crpb_dma & 0xffffffff) | index,
839 port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
840 else
841 writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS);
842
843 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, 829 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index,
844 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); 830 port_mmio + EDMA_RSP_Q_OUT_PTR_OFS);
845} 831}
846 832
833static void mv_set_main_irq_mask(struct ata_host *host,
834 u32 disable_bits, u32 enable_bits)
835{
836 struct mv_host_priv *hpriv = host->private_data;
837 u32 old_mask, new_mask;
838
839 old_mask = hpriv->main_irq_mask;
840 new_mask = (old_mask & ~disable_bits) | enable_bits;
841 if (new_mask != old_mask) {
842 hpriv->main_irq_mask = new_mask;
843 writelfl(new_mask, hpriv->main_irq_mask_addr);
844 }
845}
846
847static void mv_enable_port_irqs(struct ata_port *ap,
848 unsigned int port_bits)
849{
850 unsigned int shift, hardport, port = ap->port_no;
851 u32 disable_bits, enable_bits;
852
853 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
854
855 disable_bits = (DONE_IRQ | ERR_IRQ) << shift;
856 enable_bits = port_bits << shift;
857 mv_set_main_irq_mask(ap->host, disable_bits, enable_bits);
858}
859
847/** 860/**
848 * mv_start_dma - Enable eDMA engine 861 * mv_start_dma - Enable eDMA engine
849 * @base: port base address 862 * @base: port base address
@@ -886,9 +899,11 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
886 mv_edma_cfg(ap, want_ncq); 899 mv_edma_cfg(ap, want_ncq);
887 900
888 /* clear FIS IRQ Cause */ 901 /* clear FIS IRQ Cause */
889 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); 902 if (IS_GEN_IIE(hpriv))
903 writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
890 904
891 mv_set_edma_ptrs(port_mmio, hpriv, pp); 905 mv_set_edma_ptrs(port_mmio, hpriv, pp);
906 mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ);
892 907
893 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 908 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
894 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 909 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
@@ -1231,7 +1246,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq)
1231 1246
1232 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ 1247 cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */
1233 cfg |= (1 << 22); /* enab 4-entry host queue cache */ 1248 cfg |= (1 << 22); /* enab 4-entry host queue cache */
1234 if (HAS_PCI(ap->host)) 1249 if (!IS_SOC(hpriv))
1235 cfg |= (1 << 18); /* enab early completion */ 1250 cfg |= (1 << 18); /* enab early completion */
1236 if (hpriv->hp_flags & MV_HP_CUT_THROUGH) 1251 if (hpriv->hp_flags & MV_HP_CUT_THROUGH)
1237 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ 1252 cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */
@@ -1341,6 +1356,7 @@ out_port_free_dma_mem:
1341static void mv_port_stop(struct ata_port *ap) 1356static void mv_port_stop(struct ata_port *ap)
1342{ 1357{
1343 mv_stop_edma(ap); 1358 mv_stop_edma(ap);
1359 mv_enable_port_irqs(ap, 0);
1344 mv_port_free_dma_mem(ap); 1360 mv_port_free_dma_mem(ap);
1345} 1361}
1346 1362
@@ -1582,6 +1598,7 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc)
1582 * shadow block, etc registers. 1598 * shadow block, etc registers.
1583 */ 1599 */
1584 mv_stop_edma(ap); 1600 mv_stop_edma(ap);
1601 mv_enable_port_irqs(ap, ERR_IRQ);
1585 mv_pmp_select(ap, qc->dev->link->pmp); 1602 mv_pmp_select(ap, qc->dev->link->pmp);
1586 return ata_sff_qc_issue(qc); 1603 return ata_sff_qc_issue(qc);
1587 } 1604 }
@@ -1670,6 +1687,18 @@ static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map)
1670 } 1687 }
1671} 1688}
1672 1689
1690static int mv_req_q_empty(struct ata_port *ap)
1691{
1692 void __iomem *port_mmio = mv_ap_base(ap);
1693 u32 in_ptr, out_ptr;
1694
1695 in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS)
1696 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1697 out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS)
1698 >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK;
1699 return (in_ptr == out_ptr); /* 1 == queue_is_empty */
1700}
1701
1673static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) 1702static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1674{ 1703{
1675 struct mv_port_priv *pp = ap->private_data; 1704 struct mv_port_priv *pp = ap->private_data;
@@ -1703,7 +1732,7 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap)
1703 ap->qc_active, failed_links, 1732 ap->qc_active, failed_links,
1704 ap->nr_active_links); 1733 ap->nr_active_links);
1705 1734
1706 if (ap->nr_active_links <= failed_links) { 1735 if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) {
1707 mv_process_crpb_entries(ap, pp); 1736 mv_process_crpb_entries(ap, pp);
1708 mv_stop_edma(ap); 1737 mv_stop_edma(ap);
1709 mv_eh_freeze(ap); 1738 mv_eh_freeze(ap);
@@ -1812,6 +1841,7 @@ static void mv_err_intr(struct ata_port *ap)
1812{ 1841{
1813 void __iomem *port_mmio = mv_ap_base(ap); 1842 void __iomem *port_mmio = mv_ap_base(ap);
1814 u32 edma_err_cause, eh_freeze_mask, serr = 0; 1843 u32 edma_err_cause, eh_freeze_mask, serr = 0;
1844 u32 fis_cause = 0;
1815 struct mv_port_priv *pp = ap->private_data; 1845 struct mv_port_priv *pp = ap->private_data;
1816 struct mv_host_priv *hpriv = ap->host->private_data; 1846 struct mv_host_priv *hpriv = ap->host->private_data;
1817 unsigned int action = 0, err_mask = 0; 1847 unsigned int action = 0, err_mask = 0;
@@ -1821,16 +1851,19 @@ static void mv_err_intr(struct ata_port *ap)
1821 1851
1822 /* 1852 /*
1823 * Read and clear the SError and err_cause bits. 1853 * Read and clear the SError and err_cause bits.
1854 * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear
1855 * the FIS_IRQ_CAUSE register before clearing edma_err_cause.
1824 */ 1856 */
1825 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1857 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1826 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1858 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
1827 1859
1828 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1860 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1861 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1862 fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1863 writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS);
1864 }
1829 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1865 writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1830 1866
1831 ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n",
1832 __func__, edma_err_cause, pp->pp_flags);
1833
1834 if (edma_err_cause & EDMA_ERR_DEV) { 1867 if (edma_err_cause & EDMA_ERR_DEV) {
1835 /* 1868 /*
1836 * Device errors during FIS-based switching operation 1869 * Device errors during FIS-based switching operation
@@ -1844,6 +1877,18 @@ static void mv_err_intr(struct ata_port *ap)
1844 ata_ehi_clear_desc(ehi); 1877 ata_ehi_clear_desc(ehi);
1845 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", 1878 ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x",
1846 edma_err_cause, pp->pp_flags); 1879 edma_err_cause, pp->pp_flags);
1880
1881 if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) {
1882 ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause);
1883 if (fis_cause & SATA_FIS_IRQ_AN) {
1884 u32 ec = edma_err_cause &
1885 ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT);
1886 sata_async_notification(ap);
1887 if (!ec)
1888 return; /* Just an AN; no need for the nukes */
1889 ata_ehi_push_desc(ehi, "SDB notify");
1890 }
1891 }
1847 /* 1892 /*
1848 * All generations share these EDMA error cause bits: 1893 * All generations share these EDMA error cause bits:
1849 */ 1894 */
@@ -2162,20 +2207,20 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
2162 struct ata_host *host = dev_instance; 2207 struct ata_host *host = dev_instance;
2163 struct mv_host_priv *hpriv = host->private_data; 2208 struct mv_host_priv *hpriv = host->private_data;
2164 unsigned int handled = 0; 2209 unsigned int handled = 0;
2165 u32 main_irq_cause, main_irq_mask; 2210 u32 main_irq_cause, pending_irqs;
2166 2211
2167 spin_lock(&host->lock); 2212 spin_lock(&host->lock);
2168 main_irq_cause = readl(hpriv->main_irq_cause_addr); 2213 main_irq_cause = readl(hpriv->main_irq_cause_addr);
2169 main_irq_mask = readl(hpriv->main_irq_mask_addr); 2214 pending_irqs = main_irq_cause & hpriv->main_irq_mask;
2170 /* 2215 /*
2171 * Deal with cases where we either have nothing pending, or have read 2216 * Deal with cases where we either have nothing pending, or have read
2172 * a bogus register value which can indicate HW removal or PCI fault. 2217 * a bogus register value which can indicate HW removal or PCI fault.
2173 */ 2218 */
2174 if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) { 2219 if (pending_irqs && main_irq_cause != 0xffffffffU) {
2175 if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host))) 2220 if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv)))
2176 handled = mv_pci_error(host, hpriv->base); 2221 handled = mv_pci_error(host, hpriv->base);
2177 else 2222 else
2178 handled = mv_host_intr(host, main_irq_cause); 2223 handled = mv_host_intr(host, pending_irqs);
2179 } 2224 }
2180 spin_unlock(&host->lock); 2225 spin_unlock(&host->lock);
2181 return IRQ_RETVAL(handled); 2226 return IRQ_RETVAL(handled);
@@ -2373,7 +2418,6 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio)
2373 ZERO(MV_PCI_DISC_TIMER); 2418 ZERO(MV_PCI_DISC_TIMER);
2374 ZERO(MV_PCI_MSI_TRIGGER); 2419 ZERO(MV_PCI_MSI_TRIGGER);
2375 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); 2420 writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS);
2376 ZERO(PCI_HC_MAIN_IRQ_MASK_OFS);
2377 ZERO(MV_PCI_SERR_MASK); 2421 ZERO(MV_PCI_SERR_MASK);
2378 ZERO(hpriv->irq_cause_ofs); 2422 ZERO(hpriv->irq_cause_ofs);
2379 ZERO(hpriv->irq_mask_ofs); 2423 ZERO(hpriv->irq_mask_ofs);
@@ -2495,7 +2539,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2495 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2539 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2496 int fix_phy_mode4 = 2540 int fix_phy_mode4 =
2497 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); 2541 hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0);
2498 u32 m2, tmp; 2542 u32 m2, m3;
2499 2543
2500 if (fix_phy_mode2) { 2544 if (fix_phy_mode2) {
2501 m2 = readl(port_mmio + PHY_MODE2); 2545 m2 = readl(port_mmio + PHY_MODE2);
@@ -2512,28 +2556,36 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio,
2512 udelay(200); 2556 udelay(200);
2513 } 2557 }
2514 2558
2515 /* who knows what this magic does */ 2559 /*
2516 tmp = readl(port_mmio + PHY_MODE3); 2560 * Gen-II/IIe PHY_MODE3 errata RM#2:
2517 tmp &= ~0x7F800000; 2561 * Achieves better receiver noise performance than the h/w default:
2518 tmp |= 0x2A800000; 2562 */
2519 writel(tmp, port_mmio + PHY_MODE3); 2563 m3 = readl(port_mmio + PHY_MODE3);
2520 2564 m3 = (m3 & 0x1f) | (0x5555601 << 5);
2521 if (fix_phy_mode4) {
2522 u32 m4;
2523
2524 m4 = readl(port_mmio + PHY_MODE4);
2525
2526 if (hp_flags & MV_HP_ERRATA_60X1B2)
2527 tmp = readl(port_mmio + PHY_MODE3);
2528 2565
2529 /* workaround for errata FEr SATA#10 (part 1) */ 2566 /* Guideline 88F5182 (GL# SATA-S11) */
2530 m4 = (m4 & ~(1 << 1)) | (1 << 0); 2567 if (IS_SOC(hpriv))
2568 m3 &= ~0x1c;
2531 2569
2570 if (fix_phy_mode4) {
2571 u32 m4 = readl(port_mmio + PHY_MODE4);
2572 /*
2573 * Enforce reserved-bit restrictions on GenIIe devices only.
2574 * For earlier chipsets, force only the internal config field
2575 * (workaround for errata FEr SATA#10 part 1).
2576 */
2577 if (IS_GEN_IIE(hpriv))
2578 m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES;
2579 else
2580 m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE;
2532 writel(m4, port_mmio + PHY_MODE4); 2581 writel(m4, port_mmio + PHY_MODE4);
2533
2534 if (hp_flags & MV_HP_ERRATA_60X1B2)
2535 writel(tmp, port_mmio + PHY_MODE3);
2536 } 2582 }
2583 /*
2584 * Workaround for 60x1-B2 errata SATA#13:
2585 * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3,
2586 * so we must always rewrite PHY_MODE3 after PHY_MODE4.
2587 */
2588 writel(m3, port_mmio + PHY_MODE3);
2537 2589
2538 /* Revert values of pre-emphasis and signal amps to the saved ones */ 2590 /* Revert values of pre-emphasis and signal amps to the saved ones */
2539 m2 = readl(port_mmio + PHY_MODE2); 2591 m2 = readl(port_mmio + PHY_MODE2);
@@ -2728,6 +2780,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2728 2780
2729 rc = sata_link_hardreset(link, timing, deadline + extra, 2781 rc = sata_link_hardreset(link, timing, deadline + extra,
2730 &online, NULL); 2782 &online, NULL);
2783 rc = online ? -EAGAIN : rc;
2731 if (rc) 2784 if (rc)
2732 return rc; 2785 return rc;
2733 sata_scr_read(link, SCR_STATUS, &sstatus); 2786 sata_scr_read(link, SCR_STATUS, &sstatus);
@@ -2744,32 +2797,18 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class,
2744 2797
2745static void mv_eh_freeze(struct ata_port *ap) 2798static void mv_eh_freeze(struct ata_port *ap)
2746{ 2799{
2747 struct mv_host_priv *hpriv = ap->host->private_data;
2748 unsigned int shift, hardport, port = ap->port_no;
2749 u32 main_irq_mask;
2750
2751 /* FIXME: handle coalescing completion events properly */
2752
2753 mv_stop_edma(ap); 2800 mv_stop_edma(ap);
2754 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); 2801 mv_enable_port_irqs(ap, 0);
2755
2756 /* disable assertion of portN err, done events */
2757 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2758 main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2759 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2760} 2802}
2761 2803
2762static void mv_eh_thaw(struct ata_port *ap) 2804static void mv_eh_thaw(struct ata_port *ap)
2763{ 2805{
2764 struct mv_host_priv *hpriv = ap->host->private_data; 2806 struct mv_host_priv *hpriv = ap->host->private_data;
2765 unsigned int shift, hardport, port = ap->port_no; 2807 unsigned int port = ap->port_no;
2808 unsigned int hardport = mv_hardport_from_port(port);
2766 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); 2809 void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port);
2767 void __iomem *port_mmio = mv_ap_base(ap); 2810 void __iomem *port_mmio = mv_ap_base(ap);
2768 u32 main_irq_mask, hc_irq_cause; 2811 u32 hc_irq_cause;
2769
2770 /* FIXME: handle coalescing completion events properly */
2771
2772 MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport);
2773 2812
2774 /* clear EDMA errors on this port */ 2813 /* clear EDMA errors on this port */
2775 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2814 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
@@ -2779,10 +2818,7 @@ static void mv_eh_thaw(struct ata_port *ap)
2779 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); 2818 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport);
2780 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2819 writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2781 2820
2782 /* enable assertion of portN err, done events */ 2821 mv_enable_port_irqs(ap, ERR_IRQ);
2783 main_irq_mask = readl(hpriv->main_irq_mask_addr);
2784 main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2785 writelfl(main_irq_mask, hpriv->main_irq_mask_addr);
2786} 2822}
2787 2823
2788/** 2824/**
@@ -2840,7 +2876,7 @@ static unsigned int mv_in_pcix_mode(struct ata_host *host)
2840 void __iomem *mmio = hpriv->base; 2876 void __iomem *mmio = hpriv->base;
2841 u32 reg; 2877 u32 reg;
2842 2878
2843 if (!HAS_PCI(host) || !IS_PCIE(hpriv)) 2879 if (IS_SOC(hpriv) || !IS_PCIE(hpriv))
2844 return 0; /* not PCI-X capable */ 2880 return 0; /* not PCI-X capable */
2845 reg = readl(mmio + MV_PCI_MODE_OFS); 2881 reg = readl(mmio + MV_PCI_MODE_OFS);
2846 if ((reg & MV_PCI_MODE_MASK) == 0) 2882 if ((reg & MV_PCI_MODE_MASK) == 0)
@@ -2967,10 +3003,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2967 hp_flags |= MV_HP_CUT_THROUGH; 3003 hp_flags |= MV_HP_CUT_THROUGH;
2968 3004
2969 switch (pdev->revision) { 3005 switch (pdev->revision) {
2970 case 0x0: 3006 case 0x2: /* Rev.B0: the first/only public release */
2971 hp_flags |= MV_HP_ERRATA_XX42A0;
2972 break;
2973 case 0x1:
2974 hp_flags |= MV_HP_ERRATA_60X1C0; 3007 hp_flags |= MV_HP_ERRATA_60X1C0;
2975 break; 3008 break;
2976 default: 3009 default:
@@ -2982,7 +3015,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx)
2982 break; 3015 break;
2983 case chip_soc: 3016 case chip_soc:
2984 hpriv->ops = &mv_soc_ops; 3017 hpriv->ops = &mv_soc_ops;
2985 hp_flags |= MV_HP_ERRATA_60X1C0; 3018 hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0;
2986 break; 3019 break;
2987 3020
2988 default: 3021 default:
@@ -3026,16 +3059,16 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3026 if (rc) 3059 if (rc)
3027 goto done; 3060 goto done;
3028 3061
3029 if (HAS_PCI(host)) { 3062 if (IS_SOC(hpriv)) {
3030 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3031 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3032 } else {
3033 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; 3063 hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS;
3034 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; 3064 hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS;
3065 } else {
3066 hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS;
3067 hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS;
3035 } 3068 }
3036 3069
3037 /* global interrupt mask: 0 == mask everything */ 3070 /* global interrupt mask: 0 == mask everything */
3038 writel(0, hpriv->main_irq_mask_addr); 3071 mv_set_main_irq_mask(host, ~0, 0);
3039 3072
3040 n_hc = mv_get_hc_count(host->ports[0]->flags); 3073 n_hc = mv_get_hc_count(host->ports[0]->flags);
3041 3074
@@ -3057,7 +3090,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3057 mv_port_init(&ap->ioaddr, port_mmio); 3090 mv_port_init(&ap->ioaddr, port_mmio);
3058 3091
3059#ifdef CONFIG_PCI 3092#ifdef CONFIG_PCI
3060 if (HAS_PCI(host)) { 3093 if (!IS_SOC(hpriv)) {
3061 unsigned int offset = port_mmio - mmio; 3094 unsigned int offset = port_mmio - mmio;
3062 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); 3095 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio");
3063 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); 3096 ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port");
@@ -3077,31 +3110,18 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
3077 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); 3110 writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS);
3078 } 3111 }
3079 3112
3080 if (HAS_PCI(host)) { 3113 if (!IS_SOC(hpriv)) {
3081 /* Clear any currently outstanding host interrupt conditions */ 3114 /* Clear any currently outstanding host interrupt conditions */
3082 writelfl(0, mmio + hpriv->irq_cause_ofs); 3115 writelfl(0, mmio + hpriv->irq_cause_ofs);
3083 3116
3084 /* and unmask interrupt generation for host regs */ 3117 /* and unmask interrupt generation for host regs */
3085 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); 3118 writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs);
3086 if (IS_GEN_I(hpriv)) 3119
3087 writelfl(~HC_MAIN_MASKED_IRQS_5, 3120 /*
3088 hpriv->main_irq_mask_addr); 3121 * enable only global host interrupts for now.
3089 else 3122 * The per-port interrupts get done later as ports are set up.
3090 writelfl(~HC_MAIN_MASKED_IRQS, 3123 */
3091 hpriv->main_irq_mask_addr); 3124 mv_set_main_irq_mask(host, 0, PCI_ERR);
3092
3093 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x "
3094 "PCI int cause/mask=0x%08x/0x%08x\n",
3095 readl(hpriv->main_irq_cause_addr),
3096 readl(hpriv->main_irq_mask_addr),
3097 readl(mmio + hpriv->irq_cause_ofs),
3098 readl(mmio + hpriv->irq_mask_ofs));
3099 } else {
3100 writelfl(~HC_MAIN_MASKED_IRQS_SOC,
3101 hpriv->main_irq_mask_addr);
3102 VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n",
3103 readl(hpriv->main_irq_cause_addr),
3104 readl(hpriv->main_irq_mask_addr));
3105 } 3125 }
3106done: 3126done:
3107 return rc; 3127 return rc;