diff options
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r-- | drivers/ata/sata_mv.c | 279 |
1 files changed, 160 insertions, 119 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c index bb73b2222627..28092bc50146 100644 --- a/drivers/ata/sata_mv.c +++ b/drivers/ata/sata_mv.c | |||
@@ -72,7 +72,7 @@ | |||
72 | #include <linux/libata.h> | 72 | #include <linux/libata.h> |
73 | 73 | ||
74 | #define DRV_NAME "sata_mv" | 74 | #define DRV_NAME "sata_mv" |
75 | #define DRV_VERSION "1.20" | 75 | #define DRV_VERSION "1.24" |
76 | 76 | ||
77 | enum { | 77 | enum { |
78 | /* BAR's are enumerated in terms of pci_resource_start() terms */ | 78 | /* BAR's are enumerated in terms of pci_resource_start() terms */ |
@@ -122,14 +122,17 @@ enum { | |||
122 | /* Host Flags */ | 122 | /* Host Flags */ |
123 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ | 123 | MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ |
124 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ | 124 | MV_FLAG_IRQ_COALESCE = (1 << 29), /* IRQ coalescing capability */ |
125 | /* SoC integrated controllers, no PCI interface */ | ||
126 | MV_FLAG_SOC = (1 << 28), | ||
127 | 125 | ||
128 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | | 126 | MV_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | |
129 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | | 127 | ATA_FLAG_MMIO | ATA_FLAG_NO_ATAPI | |
130 | ATA_FLAG_PIO_POLLING, | 128 | ATA_FLAG_PIO_POLLING, |
129 | |||
131 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, | 130 | MV_6XXX_FLAGS = MV_FLAG_IRQ_COALESCE, |
132 | 131 | ||
132 | MV_GENIIE_FLAGS = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | ||
133 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
134 | ATA_FLAG_NCQ | ATA_FLAG_AN, | ||
135 | |||
133 | CRQB_FLAG_READ = (1 << 0), | 136 | CRQB_FLAG_READ = (1 << 0), |
134 | CRQB_TAG_SHIFT = 1, | 137 | CRQB_TAG_SHIFT = 1, |
135 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ | 138 | CRQB_IOID_SHIFT = 6, /* CRQB Gen-II/IIE IO Id shift */ |
@@ -197,13 +200,6 @@ enum { | |||
197 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ | 200 | HC_MAIN_RSVD = (0x7f << 25), /* bits 31-25 */ |
198 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ | 201 | HC_MAIN_RSVD_5 = (0x1fff << 19), /* bits 31-19 */ |
199 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ | 202 | HC_MAIN_RSVD_SOC = (0x3fffffb << 6), /* bits 31-9, 7-6 */ |
200 | HC_MAIN_MASKED_IRQS = (TRAN_LO_DONE | TRAN_HI_DONE | | ||
201 | PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | | ||
202 | PORTS_0_7_COAL_DONE | GPIO_INT | TWSI_INT | | ||
203 | HC_MAIN_RSVD), | ||
204 | HC_MAIN_MASKED_IRQS_5 = (PORTS_0_3_COAL_DONE | PORTS_4_7_COAL_DONE | | ||
205 | HC_MAIN_RSVD_5), | ||
206 | HC_MAIN_MASKED_IRQS_SOC = (PORTS_0_3_COAL_DONE | HC_MAIN_RSVD_SOC), | ||
207 | 203 | ||
208 | /* SATAHC registers */ | 204 | /* SATAHC registers */ |
209 | HC_CFG_OFS = 0, | 205 | HC_CFG_OFS = 0, |
@@ -221,12 +217,18 @@ enum { | |||
221 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ | 217 | SATA_STATUS_OFS = 0x300, /* ctrl, err regs follow status */ |
222 | SATA_ACTIVE_OFS = 0x350, | 218 | SATA_ACTIVE_OFS = 0x350, |
223 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, | 219 | SATA_FIS_IRQ_CAUSE_OFS = 0x364, |
220 | SATA_FIS_IRQ_AN = (1 << 9), /* async notification */ | ||
224 | 221 | ||
225 | LTMODE_OFS = 0x30c, | 222 | LTMODE_OFS = 0x30c, |
226 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ | 223 | LTMODE_BIT8 = (1 << 8), /* unknown, but necessary */ |
227 | 224 | ||
228 | PHY_MODE3 = 0x310, | 225 | PHY_MODE3 = 0x310, |
229 | PHY_MODE4 = 0x314, | 226 | PHY_MODE4 = 0x314, |
227 | PHY_MODE4_CFG_MASK = 0x00000003, /* phy internal config field */ | ||
228 | PHY_MODE4_CFG_VALUE = 0x00000001, /* phy internal config field */ | ||
229 | PHY_MODE4_RSVD_ZEROS = 0x5de3fffa, /* Gen2e always write zeros */ | ||
230 | PHY_MODE4_RSVD_ONES = 0x00000005, /* Gen2e always write ones */ | ||
231 | |||
230 | PHY_MODE2 = 0x330, | 232 | PHY_MODE2 = 0x330, |
231 | SATA_IFCTL_OFS = 0x344, | 233 | SATA_IFCTL_OFS = 0x344, |
232 | SATA_TESTCTL_OFS = 0x348, | 234 | SATA_TESTCTL_OFS = 0x348, |
@@ -357,12 +359,12 @@ enum { | |||
357 | MV_HP_ERRATA_50XXB2 = (1 << 2), | 359 | MV_HP_ERRATA_50XXB2 = (1 << 2), |
358 | MV_HP_ERRATA_60X1B2 = (1 << 3), | 360 | MV_HP_ERRATA_60X1B2 = (1 << 3), |
359 | MV_HP_ERRATA_60X1C0 = (1 << 4), | 361 | MV_HP_ERRATA_60X1C0 = (1 << 4), |
360 | MV_HP_ERRATA_XX42A0 = (1 << 5), | ||
361 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ | 362 | MV_HP_GEN_I = (1 << 6), /* Generation I: 50xx */ |
362 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ | 363 | MV_HP_GEN_II = (1 << 7), /* Generation II: 60xx */ |
363 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ | 364 | MV_HP_GEN_IIE = (1 << 8), /* Generation IIE: 6042/7042 */ |
364 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ | 365 | MV_HP_PCIE = (1 << 9), /* PCIe bus/regs: 7042 */ |
365 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ | 366 | MV_HP_CUT_THROUGH = (1 << 10), /* can use EDMA cut-through */ |
367 | MV_HP_FLAG_SOC = (1 << 11), /* SystemOnChip, no PCI */ | ||
366 | 368 | ||
367 | /* Port private flags (pp_flags) */ | 369 | /* Port private flags (pp_flags) */ |
368 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ | 370 | MV_PP_FLAG_EDMA_EN = (1 << 0), /* is EDMA engine enabled? */ |
@@ -375,7 +377,7 @@ enum { | |||
375 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) | 377 | #define IS_GEN_II(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_II) |
376 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) | 378 | #define IS_GEN_IIE(hpriv) ((hpriv)->hp_flags & MV_HP_GEN_IIE) |
377 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) | 379 | #define IS_PCIE(hpriv) ((hpriv)->hp_flags & MV_HP_PCIE) |
378 | #define HAS_PCI(host) (!((host)->ports[0]->flags & MV_FLAG_SOC)) | 380 | #define IS_SOC(hpriv) ((hpriv)->hp_flags & MV_HP_FLAG_SOC) |
379 | 381 | ||
380 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) | 382 | #define WINDOW_CTRL(i) (0x20030 + ((i) << 4)) |
381 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) | 383 | #define WINDOW_BASE(i) (0x20034 + ((i) << 4)) |
@@ -459,6 +461,7 @@ struct mv_port_signal { | |||
459 | 461 | ||
460 | struct mv_host_priv { | 462 | struct mv_host_priv { |
461 | u32 hp_flags; | 463 | u32 hp_flags; |
464 | u32 main_irq_mask; | ||
462 | struct mv_port_signal signal[8]; | 465 | struct mv_port_signal signal[8]; |
463 | const struct mv_hw_ops *ops; | 466 | const struct mv_hw_ops *ops; |
464 | int n_ports; | 467 | int n_ports; |
@@ -640,25 +643,19 @@ static const struct ata_port_info mv_port_info[] = { | |||
640 | .port_ops = &mv6_ops, | 643 | .port_ops = &mv6_ops, |
641 | }, | 644 | }, |
642 | { /* chip_6042 */ | 645 | { /* chip_6042 */ |
643 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 646 | .flags = MV_GENIIE_FLAGS, |
644 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
645 | ATA_FLAG_NCQ, | ||
646 | .pio_mask = 0x1f, /* pio0-4 */ | 647 | .pio_mask = 0x1f, /* pio0-4 */ |
647 | .udma_mask = ATA_UDMA6, | 648 | .udma_mask = ATA_UDMA6, |
648 | .port_ops = &mv_iie_ops, | 649 | .port_ops = &mv_iie_ops, |
649 | }, | 650 | }, |
650 | { /* chip_7042 */ | 651 | { /* chip_7042 */ |
651 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 652 | .flags = MV_GENIIE_FLAGS, |
652 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
653 | ATA_FLAG_NCQ, | ||
654 | .pio_mask = 0x1f, /* pio0-4 */ | 653 | .pio_mask = 0x1f, /* pio0-4 */ |
655 | .udma_mask = ATA_UDMA6, | 654 | .udma_mask = ATA_UDMA6, |
656 | .port_ops = &mv_iie_ops, | 655 | .port_ops = &mv_iie_ops, |
657 | }, | 656 | }, |
658 | { /* chip_soc */ | 657 | { /* chip_soc */ |
659 | .flags = MV_COMMON_FLAGS | MV_6XXX_FLAGS | | 658 | .flags = MV_GENIIE_FLAGS, |
660 | ATA_FLAG_PMP | ATA_FLAG_ACPI_SATA | | ||
661 | ATA_FLAG_NCQ | MV_FLAG_SOC, | ||
662 | .pio_mask = 0x1f, /* pio0-4 */ | 659 | .pio_mask = 0x1f, /* pio0-4 */ |
663 | .udma_mask = ATA_UDMA6, | 660 | .udma_mask = ATA_UDMA6, |
664 | .port_ops = &mv_iie_ops, | 661 | .port_ops = &mv_iie_ops, |
@@ -818,12 +815,7 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
818 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); | 815 | writel((pp->crqb_dma >> 16) >> 16, port_mmio + EDMA_REQ_Q_BASE_HI_OFS); |
819 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, | 816 | writelfl((pp->crqb_dma & EDMA_REQ_Q_BASE_LO_MASK) | index, |
820 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); | 817 | port_mmio + EDMA_REQ_Q_IN_PTR_OFS); |
821 | 818 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | |
822 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
823 | writelfl((pp->crqb_dma & 0xffffffff) | index, | ||
824 | port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
825 | else | ||
826 | writelfl(index, port_mmio + EDMA_REQ_Q_OUT_PTR_OFS); | ||
827 | 819 | ||
828 | /* | 820 | /* |
829 | * initialize response queue | 821 | * initialize response queue |
@@ -833,17 +825,38 @@ static void mv_set_edma_ptrs(void __iomem *port_mmio, | |||
833 | 825 | ||
834 | WARN_ON(pp->crpb_dma & 0xff); | 826 | WARN_ON(pp->crpb_dma & 0xff); |
835 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); | 827 | writel((pp->crpb_dma >> 16) >> 16, port_mmio + EDMA_RSP_Q_BASE_HI_OFS); |
836 | 828 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | |
837 | if (hpriv->hp_flags & MV_HP_ERRATA_XX42A0) | ||
838 | writelfl((pp->crpb_dma & 0xffffffff) | index, | ||
839 | port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
840 | else | ||
841 | writelfl(index, port_mmio + EDMA_RSP_Q_IN_PTR_OFS); | ||
842 | |||
843 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, | 829 | writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | index, |
844 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); | 830 | port_mmio + EDMA_RSP_Q_OUT_PTR_OFS); |
845 | } | 831 | } |
846 | 832 | ||
833 | static void mv_set_main_irq_mask(struct ata_host *host, | ||
834 | u32 disable_bits, u32 enable_bits) | ||
835 | { | ||
836 | struct mv_host_priv *hpriv = host->private_data; | ||
837 | u32 old_mask, new_mask; | ||
838 | |||
839 | old_mask = hpriv->main_irq_mask; | ||
840 | new_mask = (old_mask & ~disable_bits) | enable_bits; | ||
841 | if (new_mask != old_mask) { | ||
842 | hpriv->main_irq_mask = new_mask; | ||
843 | writelfl(new_mask, hpriv->main_irq_mask_addr); | ||
844 | } | ||
845 | } | ||
846 | |||
847 | static void mv_enable_port_irqs(struct ata_port *ap, | ||
848 | unsigned int port_bits) | ||
849 | { | ||
850 | unsigned int shift, hardport, port = ap->port_no; | ||
851 | u32 disable_bits, enable_bits; | ||
852 | |||
853 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | ||
854 | |||
855 | disable_bits = (DONE_IRQ | ERR_IRQ) << shift; | ||
856 | enable_bits = port_bits << shift; | ||
857 | mv_set_main_irq_mask(ap->host, disable_bits, enable_bits); | ||
858 | } | ||
859 | |||
847 | /** | 860 | /** |
848 | * mv_start_dma - Enable eDMA engine | 861 | * mv_start_dma - Enable eDMA engine |
849 | * @base: port base address | 862 | * @base: port base address |
@@ -886,9 +899,11 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio, | |||
886 | mv_edma_cfg(ap, want_ncq); | 899 | mv_edma_cfg(ap, want_ncq); |
887 | 900 | ||
888 | /* clear FIS IRQ Cause */ | 901 | /* clear FIS IRQ Cause */ |
889 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | 902 | if (IS_GEN_IIE(hpriv)) |
903 | writelfl(0, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
890 | 904 | ||
891 | mv_set_edma_ptrs(port_mmio, hpriv, pp); | 905 | mv_set_edma_ptrs(port_mmio, hpriv, pp); |
906 | mv_enable_port_irqs(ap, DONE_IRQ|ERR_IRQ); | ||
892 | 907 | ||
893 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); | 908 | writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); |
894 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; | 909 | pp->pp_flags |= MV_PP_FLAG_EDMA_EN; |
@@ -1231,7 +1246,7 @@ static void mv_edma_cfg(struct ata_port *ap, int want_ncq) | |||
1231 | 1246 | ||
1232 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ | 1247 | cfg |= (1 << 23); /* do not mask PM field in rx'd FIS */ |
1233 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ | 1248 | cfg |= (1 << 22); /* enab 4-entry host queue cache */ |
1234 | if (HAS_PCI(ap->host)) | 1249 | if (!IS_SOC(hpriv)) |
1235 | cfg |= (1 << 18); /* enab early completion */ | 1250 | cfg |= (1 << 18); /* enab early completion */ |
1236 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) | 1251 | if (hpriv->hp_flags & MV_HP_CUT_THROUGH) |
1237 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ | 1252 | cfg |= (1 << 17); /* enab cut-thru (dis stor&forwrd) */ |
@@ -1307,6 +1322,9 @@ static int mv_port_start(struct ata_port *ap) | |||
1307 | goto out_port_free_dma_mem; | 1322 | goto out_port_free_dma_mem; |
1308 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); | 1323 | memset(pp->crpb, 0, MV_CRPB_Q_SZ); |
1309 | 1324 | ||
1325 | /* 6041/6081 Rev. "C0" (and newer) are okay with async notify */ | ||
1326 | if (hpriv->hp_flags & MV_HP_ERRATA_60X1C0) | ||
1327 | ap->flags |= ATA_FLAG_AN; | ||
1310 | /* | 1328 | /* |
1311 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. | 1329 | * For GEN_I, there's no NCQ, so we only allocate a single sg_tbl. |
1312 | * For later hardware, we need one unique sg_tbl per NCQ tag. | 1330 | * For later hardware, we need one unique sg_tbl per NCQ tag. |
@@ -1341,6 +1359,7 @@ out_port_free_dma_mem: | |||
1341 | static void mv_port_stop(struct ata_port *ap) | 1359 | static void mv_port_stop(struct ata_port *ap) |
1342 | { | 1360 | { |
1343 | mv_stop_edma(ap); | 1361 | mv_stop_edma(ap); |
1362 | mv_enable_port_irqs(ap, 0); | ||
1344 | mv_port_free_dma_mem(ap); | 1363 | mv_port_free_dma_mem(ap); |
1345 | } | 1364 | } |
1346 | 1365 | ||
@@ -1576,12 +1595,31 @@ static unsigned int mv_qc_issue(struct ata_queued_cmd *qc) | |||
1576 | 1595 | ||
1577 | if ((qc->tf.protocol != ATA_PROT_DMA) && | 1596 | if ((qc->tf.protocol != ATA_PROT_DMA) && |
1578 | (qc->tf.protocol != ATA_PROT_NCQ)) { | 1597 | (qc->tf.protocol != ATA_PROT_NCQ)) { |
1598 | static int limit_warnings = 10; | ||
1599 | /* | ||
1600 | * Errata SATA#16, SATA#24: warn if multiple DRQs expected. | ||
1601 | * | ||
1602 | * Someday, we might implement special polling workarounds | ||
1603 | * for these, but it all seems rather unnecessary since we | ||
1604 | * normally use only DMA for commands which transfer more | ||
1605 | * than a single block of data. | ||
1606 | * | ||
1607 | * Much of the time, this could just work regardless. | ||
1608 | * So for now, just log the incident, and allow the attempt. | ||
1609 | */ | ||
1610 | if (limit_warnings && (qc->nbytes / qc->sect_size) > 1) { | ||
1611 | --limit_warnings; | ||
1612 | ata_link_printk(qc->dev->link, KERN_WARNING, DRV_NAME | ||
1613 | ": attempting PIO w/multiple DRQ: " | ||
1614 | "this may fail due to h/w errata\n"); | ||
1615 | } | ||
1579 | /* | 1616 | /* |
1580 | * We're about to send a non-EDMA capable command to the | 1617 | * We're about to send a non-EDMA capable command to the |
1581 | * port. Turn off EDMA so there won't be problems accessing | 1618 | * port. Turn off EDMA so there won't be problems accessing |
1582 | * shadow block, etc registers. | 1619 | * shadow block, etc registers. |
1583 | */ | 1620 | */ |
1584 | mv_stop_edma(ap); | 1621 | mv_stop_edma(ap); |
1622 | mv_enable_port_irqs(ap, ERR_IRQ); | ||
1585 | mv_pmp_select(ap, qc->dev->link->pmp); | 1623 | mv_pmp_select(ap, qc->dev->link->pmp); |
1586 | return ata_sff_qc_issue(qc); | 1624 | return ata_sff_qc_issue(qc); |
1587 | } | 1625 | } |
@@ -1670,6 +1708,18 @@ static void mv_pmp_eh_prep(struct ata_port *ap, unsigned int pmp_map) | |||
1670 | } | 1708 | } |
1671 | } | 1709 | } |
1672 | 1710 | ||
1711 | static int mv_req_q_empty(struct ata_port *ap) | ||
1712 | { | ||
1713 | void __iomem *port_mmio = mv_ap_base(ap); | ||
1714 | u32 in_ptr, out_ptr; | ||
1715 | |||
1716 | in_ptr = (readl(port_mmio + EDMA_REQ_Q_IN_PTR_OFS) | ||
1717 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1718 | out_ptr = (readl(port_mmio + EDMA_REQ_Q_OUT_PTR_OFS) | ||
1719 | >> EDMA_REQ_Q_PTR_SHIFT) & MV_MAX_Q_DEPTH_MASK; | ||
1720 | return (in_ptr == out_ptr); /* 1 == queue_is_empty */ | ||
1721 | } | ||
1722 | |||
1673 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) | 1723 | static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) |
1674 | { | 1724 | { |
1675 | struct mv_port_priv *pp = ap->private_data; | 1725 | struct mv_port_priv *pp = ap->private_data; |
@@ -1703,7 +1753,7 @@ static int mv_handle_fbs_ncq_dev_err(struct ata_port *ap) | |||
1703 | ap->qc_active, failed_links, | 1753 | ap->qc_active, failed_links, |
1704 | ap->nr_active_links); | 1754 | ap->nr_active_links); |
1705 | 1755 | ||
1706 | if (ap->nr_active_links <= failed_links) { | 1756 | if (ap->nr_active_links <= failed_links && mv_req_q_empty(ap)) { |
1707 | mv_process_crpb_entries(ap, pp); | 1757 | mv_process_crpb_entries(ap, pp); |
1708 | mv_stop_edma(ap); | 1758 | mv_stop_edma(ap); |
1709 | mv_eh_freeze(ap); | 1759 | mv_eh_freeze(ap); |
@@ -1812,6 +1862,7 @@ static void mv_err_intr(struct ata_port *ap) | |||
1812 | { | 1862 | { |
1813 | void __iomem *port_mmio = mv_ap_base(ap); | 1863 | void __iomem *port_mmio = mv_ap_base(ap); |
1814 | u32 edma_err_cause, eh_freeze_mask, serr = 0; | 1864 | u32 edma_err_cause, eh_freeze_mask, serr = 0; |
1865 | u32 fis_cause = 0; | ||
1815 | struct mv_port_priv *pp = ap->private_data; | 1866 | struct mv_port_priv *pp = ap->private_data; |
1816 | struct mv_host_priv *hpriv = ap->host->private_data; | 1867 | struct mv_host_priv *hpriv = ap->host->private_data; |
1817 | unsigned int action = 0, err_mask = 0; | 1868 | unsigned int action = 0, err_mask = 0; |
@@ -1821,16 +1872,19 @@ static void mv_err_intr(struct ata_port *ap) | |||
1821 | 1872 | ||
1822 | /* | 1873 | /* |
1823 | * Read and clear the SError and err_cause bits. | 1874 | * Read and clear the SError and err_cause bits. |
1875 | * For GenIIe, if EDMA_ERR_TRANS_IRQ_7 is set, we also must read/clear | ||
1876 | * the FIS_IRQ_CAUSE register before clearing edma_err_cause. | ||
1824 | */ | 1877 | */ |
1825 | sata_scr_read(&ap->link, SCR_ERROR, &serr); | 1878 | sata_scr_read(&ap->link, SCR_ERROR, &serr); |
1826 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); | 1879 | sata_scr_write_flush(&ap->link, SCR_ERROR, serr); |
1827 | 1880 | ||
1828 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1881 | edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1882 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { | ||
1883 | fis_cause = readl(port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
1884 | writelfl(~fis_cause, port_mmio + SATA_FIS_IRQ_CAUSE_OFS); | ||
1885 | } | ||
1829 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 1886 | writelfl(~edma_err_cause, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
1830 | 1887 | ||
1831 | ata_port_printk(ap, KERN_INFO, "%s: err_cause=%08x pp_flags=0x%x\n", | ||
1832 | __func__, edma_err_cause, pp->pp_flags); | ||
1833 | |||
1834 | if (edma_err_cause & EDMA_ERR_DEV) { | 1888 | if (edma_err_cause & EDMA_ERR_DEV) { |
1835 | /* | 1889 | /* |
1836 | * Device errors during FIS-based switching operation | 1890 | * Device errors during FIS-based switching operation |
@@ -1844,6 +1898,18 @@ static void mv_err_intr(struct ata_port *ap) | |||
1844 | ata_ehi_clear_desc(ehi); | 1898 | ata_ehi_clear_desc(ehi); |
1845 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", | 1899 | ata_ehi_push_desc(ehi, "edma_err_cause=%08x pp_flags=%08x", |
1846 | edma_err_cause, pp->pp_flags); | 1900 | edma_err_cause, pp->pp_flags); |
1901 | |||
1902 | if (IS_GEN_IIE(hpriv) && (edma_err_cause & EDMA_ERR_TRANS_IRQ_7)) { | ||
1903 | ata_ehi_push_desc(ehi, "fis_cause=%08x", fis_cause); | ||
1904 | if (fis_cause & SATA_FIS_IRQ_AN) { | ||
1905 | u32 ec = edma_err_cause & | ||
1906 | ~(EDMA_ERR_TRANS_IRQ_7 | EDMA_ERR_IRQ_TRANSIENT); | ||
1907 | sata_async_notification(ap); | ||
1908 | if (!ec) | ||
1909 | return; /* Just an AN; no need for the nukes */ | ||
1910 | ata_ehi_push_desc(ehi, "SDB notify"); | ||
1911 | } | ||
1912 | } | ||
1847 | /* | 1913 | /* |
1848 | * All generations share these EDMA error cause bits: | 1914 | * All generations share these EDMA error cause bits: |
1849 | */ | 1915 | */ |
@@ -2162,20 +2228,20 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance) | |||
2162 | struct ata_host *host = dev_instance; | 2228 | struct ata_host *host = dev_instance; |
2163 | struct mv_host_priv *hpriv = host->private_data; | 2229 | struct mv_host_priv *hpriv = host->private_data; |
2164 | unsigned int handled = 0; | 2230 | unsigned int handled = 0; |
2165 | u32 main_irq_cause, main_irq_mask; | 2231 | u32 main_irq_cause, pending_irqs; |
2166 | 2232 | ||
2167 | spin_lock(&host->lock); | 2233 | spin_lock(&host->lock); |
2168 | main_irq_cause = readl(hpriv->main_irq_cause_addr); | 2234 | main_irq_cause = readl(hpriv->main_irq_cause_addr); |
2169 | main_irq_mask = readl(hpriv->main_irq_mask_addr); | 2235 | pending_irqs = main_irq_cause & hpriv->main_irq_mask; |
2170 | /* | 2236 | /* |
2171 | * Deal with cases where we either have nothing pending, or have read | 2237 | * Deal with cases where we either have nothing pending, or have read |
2172 | * a bogus register value which can indicate HW removal or PCI fault. | 2238 | * a bogus register value which can indicate HW removal or PCI fault. |
2173 | */ | 2239 | */ |
2174 | if ((main_irq_cause & main_irq_mask) && (main_irq_cause != 0xffffffffU)) { | 2240 | if (pending_irqs && main_irq_cause != 0xffffffffU) { |
2175 | if (unlikely((main_irq_cause & PCI_ERR) && HAS_PCI(host))) | 2241 | if (unlikely((pending_irqs & PCI_ERR) && !IS_SOC(hpriv))) |
2176 | handled = mv_pci_error(host, hpriv->base); | 2242 | handled = mv_pci_error(host, hpriv->base); |
2177 | else | 2243 | else |
2178 | handled = mv_host_intr(host, main_irq_cause); | 2244 | handled = mv_host_intr(host, pending_irqs); |
2179 | } | 2245 | } |
2180 | spin_unlock(&host->lock); | 2246 | spin_unlock(&host->lock); |
2181 | return IRQ_RETVAL(handled); | 2247 | return IRQ_RETVAL(handled); |
@@ -2373,7 +2439,6 @@ static void mv_reset_pci_bus(struct ata_host *host, void __iomem *mmio) | |||
2373 | ZERO(MV_PCI_DISC_TIMER); | 2439 | ZERO(MV_PCI_DISC_TIMER); |
2374 | ZERO(MV_PCI_MSI_TRIGGER); | 2440 | ZERO(MV_PCI_MSI_TRIGGER); |
2375 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); | 2441 | writel(0x000100ff, mmio + MV_PCI_XBAR_TMOUT_OFS); |
2376 | ZERO(PCI_HC_MAIN_IRQ_MASK_OFS); | ||
2377 | ZERO(MV_PCI_SERR_MASK); | 2442 | ZERO(MV_PCI_SERR_MASK); |
2378 | ZERO(hpriv->irq_cause_ofs); | 2443 | ZERO(hpriv->irq_cause_ofs); |
2379 | ZERO(hpriv->irq_mask_ofs); | 2444 | ZERO(hpriv->irq_mask_ofs); |
@@ -2495,7 +2560,7 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2495 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | 2560 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
2496 | int fix_phy_mode4 = | 2561 | int fix_phy_mode4 = |
2497 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); | 2562 | hp_flags & (MV_HP_ERRATA_60X1B2 | MV_HP_ERRATA_60X1C0); |
2498 | u32 m2, tmp; | 2563 | u32 m2, m3; |
2499 | 2564 | ||
2500 | if (fix_phy_mode2) { | 2565 | if (fix_phy_mode2) { |
2501 | m2 = readl(port_mmio + PHY_MODE2); | 2566 | m2 = readl(port_mmio + PHY_MODE2); |
@@ -2512,28 +2577,36 @@ static void mv6_phy_errata(struct mv_host_priv *hpriv, void __iomem *mmio, | |||
2512 | udelay(200); | 2577 | udelay(200); |
2513 | } | 2578 | } |
2514 | 2579 | ||
2515 | /* who knows what this magic does */ | 2580 | /* |
2516 | tmp = readl(port_mmio + PHY_MODE3); | 2581 | * Gen-II/IIe PHY_MODE3 errata RM#2: |
2517 | tmp &= ~0x7F800000; | 2582 | * Achieves better receiver noise performance than the h/w default: |
2518 | tmp |= 0x2A800000; | 2583 | */ |
2519 | writel(tmp, port_mmio + PHY_MODE3); | 2584 | m3 = readl(port_mmio + PHY_MODE3); |
2520 | 2585 | m3 = (m3 & 0x1f) | (0x5555601 << 5); | |
2521 | if (fix_phy_mode4) { | ||
2522 | u32 m4; | ||
2523 | |||
2524 | m4 = readl(port_mmio + PHY_MODE4); | ||
2525 | |||
2526 | if (hp_flags & MV_HP_ERRATA_60X1B2) | ||
2527 | tmp = readl(port_mmio + PHY_MODE3); | ||
2528 | 2586 | ||
2529 | /* workaround for errata FEr SATA#10 (part 1) */ | 2587 | /* Guideline 88F5182 (GL# SATA-S11) */ |
2530 | m4 = (m4 & ~(1 << 1)) | (1 << 0); | 2588 | if (IS_SOC(hpriv)) |
2589 | m3 &= ~0x1c; | ||
2531 | 2590 | ||
2591 | if (fix_phy_mode4) { | ||
2592 | u32 m4 = readl(port_mmio + PHY_MODE4); | ||
2593 | /* | ||
2594 | * Enforce reserved-bit restrictions on GenIIe devices only. | ||
2595 | * For earlier chipsets, force only the internal config field | ||
2596 | * (workaround for errata FEr SATA#10 part 1). | ||
2597 | */ | ||
2598 | if (IS_GEN_IIE(hpriv)) | ||
2599 | m4 = (m4 & ~PHY_MODE4_RSVD_ZEROS) | PHY_MODE4_RSVD_ONES; | ||
2600 | else | ||
2601 | m4 = (m4 & ~PHY_MODE4_CFG_MASK) | PHY_MODE4_CFG_VALUE; | ||
2532 | writel(m4, port_mmio + PHY_MODE4); | 2602 | writel(m4, port_mmio + PHY_MODE4); |
2533 | |||
2534 | if (hp_flags & MV_HP_ERRATA_60X1B2) | ||
2535 | writel(tmp, port_mmio + PHY_MODE3); | ||
2536 | } | 2603 | } |
2604 | /* | ||
2605 | * Workaround for 60x1-B2 errata SATA#13: | ||
2606 | * Any write to PHY_MODE4 (above) may corrupt PHY_MODE3, | ||
2607 | * so we must always rewrite PHY_MODE3 after PHY_MODE4. | ||
2608 | */ | ||
2609 | writel(m3, port_mmio + PHY_MODE3); | ||
2537 | 2610 | ||
2538 | /* Revert values of pre-emphasis and signal amps to the saved ones */ | 2611 | /* Revert values of pre-emphasis and signal amps to the saved ones */ |
2539 | m2 = readl(port_mmio + PHY_MODE2); | 2612 | m2 = readl(port_mmio + PHY_MODE2); |
@@ -2728,6 +2801,7 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2728 | 2801 | ||
2729 | rc = sata_link_hardreset(link, timing, deadline + extra, | 2802 | rc = sata_link_hardreset(link, timing, deadline + extra, |
2730 | &online, NULL); | 2803 | &online, NULL); |
2804 | rc = online ? -EAGAIN : rc; | ||
2731 | if (rc) | 2805 | if (rc) |
2732 | return rc; | 2806 | return rc; |
2733 | sata_scr_read(link, SCR_STATUS, &sstatus); | 2807 | sata_scr_read(link, SCR_STATUS, &sstatus); |
@@ -2744,32 +2818,18 @@ static int mv_hardreset(struct ata_link *link, unsigned int *class, | |||
2744 | 2818 | ||
2745 | static void mv_eh_freeze(struct ata_port *ap) | 2819 | static void mv_eh_freeze(struct ata_port *ap) |
2746 | { | 2820 | { |
2747 | struct mv_host_priv *hpriv = ap->host->private_data; | ||
2748 | unsigned int shift, hardport, port = ap->port_no; | ||
2749 | u32 main_irq_mask; | ||
2750 | |||
2751 | /* FIXME: handle coalescing completion events properly */ | ||
2752 | |||
2753 | mv_stop_edma(ap); | 2821 | mv_stop_edma(ap); |
2754 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | 2822 | mv_enable_port_irqs(ap, 0); |
2755 | |||
2756 | /* disable assertion of portN err, done events */ | ||
2757 | main_irq_mask = readl(hpriv->main_irq_mask_addr); | ||
2758 | main_irq_mask &= ~((DONE_IRQ | ERR_IRQ) << shift); | ||
2759 | writelfl(main_irq_mask, hpriv->main_irq_mask_addr); | ||
2760 | } | 2823 | } |
2761 | 2824 | ||
2762 | static void mv_eh_thaw(struct ata_port *ap) | 2825 | static void mv_eh_thaw(struct ata_port *ap) |
2763 | { | 2826 | { |
2764 | struct mv_host_priv *hpriv = ap->host->private_data; | 2827 | struct mv_host_priv *hpriv = ap->host->private_data; |
2765 | unsigned int shift, hardport, port = ap->port_no; | 2828 | unsigned int port = ap->port_no; |
2829 | unsigned int hardport = mv_hardport_from_port(port); | ||
2766 | void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); | 2830 | void __iomem *hc_mmio = mv_hc_base_from_port(hpriv->base, port); |
2767 | void __iomem *port_mmio = mv_ap_base(ap); | 2831 | void __iomem *port_mmio = mv_ap_base(ap); |
2768 | u32 main_irq_mask, hc_irq_cause; | 2832 | u32 hc_irq_cause; |
2769 | |||
2770 | /* FIXME: handle coalescing completion events properly */ | ||
2771 | |||
2772 | MV_PORT_TO_SHIFT_AND_HARDPORT(port, shift, hardport); | ||
2773 | 2833 | ||
2774 | /* clear EDMA errors on this port */ | 2834 | /* clear EDMA errors on this port */ |
2775 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); | 2835 | writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); |
@@ -2779,10 +2839,7 @@ static void mv_eh_thaw(struct ata_port *ap) | |||
2779 | hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); | 2839 | hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hardport); |
2780 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); | 2840 | writelfl(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); |
2781 | 2841 | ||
2782 | /* enable assertion of portN err, done events */ | 2842 | mv_enable_port_irqs(ap, ERR_IRQ); |
2783 | main_irq_mask = readl(hpriv->main_irq_mask_addr); | ||
2784 | main_irq_mask |= ((DONE_IRQ | ERR_IRQ) << shift); | ||
2785 | writelfl(main_irq_mask, hpriv->main_irq_mask_addr); | ||
2786 | } | 2843 | } |
2787 | 2844 | ||
2788 | /** | 2845 | /** |
@@ -2840,7 +2897,7 @@ static unsigned int mv_in_pcix_mode(struct ata_host *host) | |||
2840 | void __iomem *mmio = hpriv->base; | 2897 | void __iomem *mmio = hpriv->base; |
2841 | u32 reg; | 2898 | u32 reg; |
2842 | 2899 | ||
2843 | if (!HAS_PCI(host) || !IS_PCIE(hpriv)) | 2900 | if (IS_SOC(hpriv) || !IS_PCIE(hpriv)) |
2844 | return 0; /* not PCI-X capable */ | 2901 | return 0; /* not PCI-X capable */ |
2845 | reg = readl(mmio + MV_PCI_MODE_OFS); | 2902 | reg = readl(mmio + MV_PCI_MODE_OFS); |
2846 | if ((reg & MV_PCI_MODE_MASK) == 0) | 2903 | if ((reg & MV_PCI_MODE_MASK) == 0) |
@@ -2967,10 +3024,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2967 | hp_flags |= MV_HP_CUT_THROUGH; | 3024 | hp_flags |= MV_HP_CUT_THROUGH; |
2968 | 3025 | ||
2969 | switch (pdev->revision) { | 3026 | switch (pdev->revision) { |
2970 | case 0x0: | 3027 | case 0x2: /* Rev.B0: the first/only public release */ |
2971 | hp_flags |= MV_HP_ERRATA_XX42A0; | ||
2972 | break; | ||
2973 | case 0x1: | ||
2974 | hp_flags |= MV_HP_ERRATA_60X1C0; | 3028 | hp_flags |= MV_HP_ERRATA_60X1C0; |
2975 | break; | 3029 | break; |
2976 | default: | 3030 | default: |
@@ -2982,7 +3036,7 @@ static int mv_chip_id(struct ata_host *host, unsigned int board_idx) | |||
2982 | break; | 3036 | break; |
2983 | case chip_soc: | 3037 | case chip_soc: |
2984 | hpriv->ops = &mv_soc_ops; | 3038 | hpriv->ops = &mv_soc_ops; |
2985 | hp_flags |= MV_HP_ERRATA_60X1C0; | 3039 | hp_flags |= MV_HP_FLAG_SOC | MV_HP_ERRATA_60X1C0; |
2986 | break; | 3040 | break; |
2987 | 3041 | ||
2988 | default: | 3042 | default: |
@@ -3026,16 +3080,16 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3026 | if (rc) | 3080 | if (rc) |
3027 | goto done; | 3081 | goto done; |
3028 | 3082 | ||
3029 | if (HAS_PCI(host)) { | 3083 | if (IS_SOC(hpriv)) { |
3030 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | ||
3031 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | ||
3032 | } else { | ||
3033 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; | 3084 | hpriv->main_irq_cause_addr = mmio + SOC_HC_MAIN_IRQ_CAUSE_OFS; |
3034 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; | 3085 | hpriv->main_irq_mask_addr = mmio + SOC_HC_MAIN_IRQ_MASK_OFS; |
3086 | } else { | ||
3087 | hpriv->main_irq_cause_addr = mmio + PCI_HC_MAIN_IRQ_CAUSE_OFS; | ||
3088 | hpriv->main_irq_mask_addr = mmio + PCI_HC_MAIN_IRQ_MASK_OFS; | ||
3035 | } | 3089 | } |
3036 | 3090 | ||
3037 | /* global interrupt mask: 0 == mask everything */ | 3091 | /* global interrupt mask: 0 == mask everything */ |
3038 | writel(0, hpriv->main_irq_mask_addr); | 3092 | mv_set_main_irq_mask(host, ~0, 0); |
3039 | 3093 | ||
3040 | n_hc = mv_get_hc_count(host->ports[0]->flags); | 3094 | n_hc = mv_get_hc_count(host->ports[0]->flags); |
3041 | 3095 | ||
@@ -3057,7 +3111,7 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3057 | mv_port_init(&ap->ioaddr, port_mmio); | 3111 | mv_port_init(&ap->ioaddr, port_mmio); |
3058 | 3112 | ||
3059 | #ifdef CONFIG_PCI | 3113 | #ifdef CONFIG_PCI |
3060 | if (HAS_PCI(host)) { | 3114 | if (!IS_SOC(hpriv)) { |
3061 | unsigned int offset = port_mmio - mmio; | 3115 | unsigned int offset = port_mmio - mmio; |
3062 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); | 3116 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, -1, "mmio"); |
3063 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); | 3117 | ata_port_pbar_desc(ap, MV_PRIMARY_BAR, offset, "port"); |
@@ -3077,31 +3131,18 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx) | |||
3077 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); | 3131 | writelfl(0, hc_mmio + HC_IRQ_CAUSE_OFS); |
3078 | } | 3132 | } |
3079 | 3133 | ||
3080 | if (HAS_PCI(host)) { | 3134 | if (!IS_SOC(hpriv)) { |
3081 | /* Clear any currently outstanding host interrupt conditions */ | 3135 | /* Clear any currently outstanding host interrupt conditions */ |
3082 | writelfl(0, mmio + hpriv->irq_cause_ofs); | 3136 | writelfl(0, mmio + hpriv->irq_cause_ofs); |
3083 | 3137 | ||
3084 | /* and unmask interrupt generation for host regs */ | 3138 | /* and unmask interrupt generation for host regs */ |
3085 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); | 3139 | writelfl(hpriv->unmask_all_irqs, mmio + hpriv->irq_mask_ofs); |
3086 | if (IS_GEN_I(hpriv)) | 3140 | |
3087 | writelfl(~HC_MAIN_MASKED_IRQS_5, | 3141 | /* |
3088 | hpriv->main_irq_mask_addr); | 3142 | * enable only global host interrupts for now. |
3089 | else | 3143 | * The per-port interrupts get done later as ports are set up. |
3090 | writelfl(~HC_MAIN_MASKED_IRQS, | 3144 | */ |
3091 | hpriv->main_irq_mask_addr); | 3145 | mv_set_main_irq_mask(host, 0, PCI_ERR); |
3092 | |||
3093 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x " | ||
3094 | "PCI int cause/mask=0x%08x/0x%08x\n", | ||
3095 | readl(hpriv->main_irq_cause_addr), | ||
3096 | readl(hpriv->main_irq_mask_addr), | ||
3097 | readl(mmio + hpriv->irq_cause_ofs), | ||
3098 | readl(mmio + hpriv->irq_mask_ofs)); | ||
3099 | } else { | ||
3100 | writelfl(~HC_MAIN_MASKED_IRQS_SOC, | ||
3101 | hpriv->main_irq_mask_addr); | ||
3102 | VPRINTK("HC MAIN IRQ cause/mask=0x%08x/0x%08x\n", | ||
3103 | readl(hpriv->main_irq_cause_addr), | ||
3104 | readl(hpriv->main_irq_mask_addr)); | ||
3105 | } | 3146 | } |
3106 | done: | 3147 | done: |
3107 | return rc; | 3148 | return rc; |