aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorMark Lord <liml@rtr.ca>2008-04-19 14:43:42 -0400
committerJeff Garzik <jgarzik@redhat.com>2008-04-25 01:11:40 -0400
commit352fab701ca4753dd005b67ce5e512be944eb591 (patch)
treecefd4a340a39bd48aaaafd716dcdee98938eb0f7 /drivers/ata/sata_mv.c
parent01ce2601e4ba354fe1e25bb940817570d0c8ed4f (diff)
sata_mv more cosmetics
More cosmetic cleanups prior to the interrupt/error handling logic changes. Signed-off-by: Mark Lord <mlord@pobox.com> Signed-off-by: Jeff Garzik <jgarzik@redhat.com>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c131
1 files changed, 64 insertions, 67 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index d52ce1188327..c01d6bf4c593 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -124,11 +124,11 @@ enum {
124 MV_MAX_SG_CT = 256, 124 MV_MAX_SG_CT = 256,
125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT), 125 MV_SG_TBL_SZ = (16 * MV_MAX_SG_CT),
126 126
127 MV_PORTS_PER_HC = 4, 127 /* Determine hc from 0-7 port: hc = port >> MV_PORT_HC_SHIFT */
128 /* == (port / MV_PORTS_PER_HC) to determine HC from 0-7 port */
129 MV_PORT_HC_SHIFT = 2, 128 MV_PORT_HC_SHIFT = 2,
130 /* == (port % MV_PORTS_PER_HC) to determine hard port from 0-7 port */ 129 MV_PORTS_PER_HC = (1 << MV_PORT_HC_SHIFT), /* 4 */
131 MV_PORT_MASK = 3, 130 /* Determine hc port from 0-7 port: hardport = port & MV_PORT_MASK */
131 MV_PORT_MASK = (MV_PORTS_PER_HC - 1), /* 3 */
132 132
133 /* Host Flags */ 133 /* Host Flags */
134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */ 134 MV_FLAG_DUAL_HC = (1 << 30), /* two SATA Host Controllers */
@@ -188,8 +188,8 @@ enum {
188 HC_MAIN_IRQ_MASK_OFS = 0x1d64, 188 HC_MAIN_IRQ_MASK_OFS = 0x1d64,
189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020, 189 HC_SOC_MAIN_IRQ_CAUSE_OFS = 0x20020,
190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024, 190 HC_SOC_MAIN_IRQ_MASK_OFS = 0x20024,
191 PORT0_ERR = (1 << 0), /* shift by port # */ 191 ERR_IRQ = (1 << 0), /* shift by port # */
192 PORT0_DONE = (1 << 1), /* shift by port # */ 192 DONE_IRQ = (1 << 1), /* shift by port # */
193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */ 193 HC0_IRQ_PEND = 0x1ff, /* bits 0-8 = HC0's ports */
194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */ 194 HC_SHIFT = 9, /* bits 9-17 = HC1's ports */
195 PCI_ERR = (1 << 18), 195 PCI_ERR = (1 << 18),
@@ -215,8 +215,8 @@ enum {
215 HC_CFG_OFS = 0, 215 HC_CFG_OFS = 0,
216 216
217 HC_IRQ_CAUSE_OFS = 0x14, 217 HC_IRQ_CAUSE_OFS = 0x14,
218 CRPB_DMA_DONE = (1 << 0), /* shift by port # */ 218 DMA_IRQ = (1 << 0), /* shift by port # */
219 HC_IRQ_COAL = (1 << 4), /* IRQ coalescing */ 219 HC_COAL_IRQ = (1 << 4), /* IRQ coalescing */
220 DEV_IRQ = (1 << 8), /* shift by port # */ 220 DEV_IRQ = (1 << 8), /* shift by port # */
221 221
222 /* Shadow block registers */ 222 /* Shadow block registers */
@@ -349,6 +349,8 @@ enum {
349 EDMA_IORDY_TMOUT = 0x34, 349 EDMA_IORDY_TMOUT = 0x34,
350 EDMA_ARB_CFG = 0x38, 350 EDMA_ARB_CFG = 0x38,
351 351
352 GEN_II_NCQ_MAX_SECTORS = 256, /* max sects/io on Gen2 w/NCQ */
353
352 /* Host private flags (hp_flags) */ 354 /* Host private flags (hp_flags) */
353 MV_HP_FLAG_MSI = (1 << 0), 355 MV_HP_FLAG_MSI = (1 << 0),
354 MV_HP_ERRATA_50XXB0 = (1 << 1), 356 MV_HP_ERRATA_50XXB0 = (1 << 1),
@@ -722,11 +724,6 @@ static inline void writelfl(unsigned long data, void __iomem *addr)
722 (void) readl(addr); /* flush to avoid PCI posted write */ 724 (void) readl(addr); /* flush to avoid PCI posted write */
723} 725}
724 726
725static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
726{
727 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
728}
729
730static inline unsigned int mv_hc_from_port(unsigned int port) 727static inline unsigned int mv_hc_from_port(unsigned int port)
731{ 728{
732 return port >> MV_PORT_HC_SHIFT; 729 return port >> MV_PORT_HC_SHIFT;
@@ -737,6 +734,11 @@ static inline unsigned int mv_hardport_from_port(unsigned int port)
737 return port & MV_PORT_MASK; 734 return port & MV_PORT_MASK;
738} 735}
739 736
737static inline void __iomem *mv_hc_base(void __iomem *base, unsigned int hc)
738{
739 return (base + MV_SATAHC0_REG_BASE + (hc * MV_SATAHC_REG_SZ));
740}
741
740static inline void __iomem *mv_hc_base_from_port(void __iomem *base, 742static inline void __iomem *mv_hc_base_from_port(void __iomem *base,
741 unsigned int port) 743 unsigned int port)
742{ 744{
@@ -837,9 +839,9 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
837 } 839 }
838 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) { 840 if (!(pp->pp_flags & MV_PP_FLAG_EDMA_EN)) {
839 struct mv_host_priv *hpriv = ap->host->private_data; 841 struct mv_host_priv *hpriv = ap->host->private_data;
840 int hard_port = mv_hardport_from_port(ap->port_no); 842 int hardport = mv_hardport_from_port(ap->port_no);
841 void __iomem *hc_mmio = mv_hc_base_from_port( 843 void __iomem *hc_mmio = mv_hc_base_from_port(
842 mv_host_base(ap->host), hard_port); 844 mv_host_base(ap->host), hardport);
843 u32 hc_irq_cause, ipending; 845 u32 hc_irq_cause, ipending;
844 846
845 /* clear EDMA event indicators, if any */ 847 /* clear EDMA event indicators, if any */
@@ -847,8 +849,7 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
847 849
848 /* clear EDMA interrupt indicator, if any */ 850 /* clear EDMA interrupt indicator, if any */
849 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 851 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
850 ipending = (DEV_IRQ << hard_port) | 852 ipending = (DEV_IRQ | DMA_IRQ) << hardport;
851 (CRPB_DMA_DONE << hard_port);
852 if (hc_irq_cause & ipending) { 853 if (hc_irq_cause & ipending) {
853 writelfl(hc_irq_cause & ~ipending, 854 writelfl(hc_irq_cause & ~ipending,
854 hc_mmio + HC_IRQ_CAUSE_OFS); 855 hc_mmio + HC_IRQ_CAUSE_OFS);
@@ -864,7 +865,6 @@ static void mv_start_dma(struct ata_port *ap, void __iomem *port_mmio,
864 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS); 865 writelfl(EDMA_EN, port_mmio + EDMA_CMD_OFS);
865 pp->pp_flags |= MV_PP_FLAG_EDMA_EN; 866 pp->pp_flags |= MV_PP_FLAG_EDMA_EN;
866 } 867 }
867 WARN_ON(!(EDMA_EN & readl(port_mmio + EDMA_CMD_OFS)));
868} 868}
869 869
870/** 870/**
@@ -1036,10 +1036,16 @@ static void mv6_dev_config(struct ata_device *adev)
1036 * See mv_qc_prep() for more info. 1036 * See mv_qc_prep() for more info.
1037 */ 1037 */
1038 if (adev->flags & ATA_DFLAG_NCQ) { 1038 if (adev->flags & ATA_DFLAG_NCQ) {
1039 if (sata_pmp_attached(adev->link->ap)) 1039 if (sata_pmp_attached(adev->link->ap)) {
1040 adev->flags &= ~ATA_DFLAG_NCQ; 1040 adev->flags &= ~ATA_DFLAG_NCQ;
1041 else if (adev->max_sectors > ATA_MAX_SECTORS) 1041 ata_dev_printk(adev, KERN_INFO,
1042 adev->max_sectors = ATA_MAX_SECTORS; 1042 "NCQ disabled for command-based switching\n");
1043 } else if (adev->max_sectors > GEN_II_NCQ_MAX_SECTORS) {
1044 adev->max_sectors = GEN_II_NCQ_MAX_SECTORS;
1045 ata_dev_printk(adev, KERN_INFO,
1046 "max_sectors limited to %u for NCQ\n",
1047 adev->max_sectors);
1048 }
1043 } 1049 }
1044} 1050}
1045 1051
@@ -1493,12 +1499,11 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1493 1499
1494 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 1500 edma_err_cause = readl(port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
1495 1501
1496 ata_ehi_push_desc(ehi, "edma_err 0x%08x", edma_err_cause); 1502 ata_ehi_push_desc(ehi, "edma_err_cause=%08x", edma_err_cause);
1497 1503
1498 /* 1504 /*
1499 * all generations share these EDMA error cause bits 1505 * All generations share these EDMA error cause bits:
1500 */ 1506 */
1501
1502 if (edma_err_cause & EDMA_ERR_DEV) 1507 if (edma_err_cause & EDMA_ERR_DEV)
1503 err_mask |= AC_ERR_DEV; 1508 err_mask |= AC_ERR_DEV;
1504 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR | 1509 if (edma_err_cause & (EDMA_ERR_D_PAR | EDMA_ERR_PRD_PAR |
@@ -1515,23 +1520,22 @@ static void mv_err_intr(struct ata_port *ap, struct ata_queued_cmd *qc)
1515 action |= ATA_EH_RESET; 1520 action |= ATA_EH_RESET;
1516 } 1521 }
1517 1522
1523 /*
1524 * Gen-I has a different SELF_DIS bit,
1525 * different FREEZE bits, and no SERR bit:
1526 */
1518 if (IS_GEN_I(hpriv)) { 1527 if (IS_GEN_I(hpriv)) {
1519 eh_freeze_mask = EDMA_EH_FREEZE_5; 1528 eh_freeze_mask = EDMA_EH_FREEZE_5;
1520
1521 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) { 1529 if (edma_err_cause & EDMA_ERR_SELF_DIS_5) {
1522 pp = ap->private_data;
1523 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1530 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1524 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1531 ata_ehi_push_desc(ehi, "EDMA self-disable");
1525 } 1532 }
1526 } else { 1533 } else {
1527 eh_freeze_mask = EDMA_EH_FREEZE; 1534 eh_freeze_mask = EDMA_EH_FREEZE;
1528
1529 if (edma_err_cause & EDMA_ERR_SELF_DIS) { 1535 if (edma_err_cause & EDMA_ERR_SELF_DIS) {
1530 pp = ap->private_data;
1531 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN; 1536 pp->pp_flags &= ~MV_PP_FLAG_EDMA_EN;
1532 ata_ehi_push_desc(ehi, "EDMA self-disable"); 1537 ata_ehi_push_desc(ehi, "EDMA self-disable");
1533 } 1538 }
1534
1535 if (edma_err_cause & EDMA_ERR_SERR) { 1539 if (edma_err_cause & EDMA_ERR_SERR) {
1536 sata_scr_read(&ap->link, SCR_ERROR, &serr); 1540 sata_scr_read(&ap->link, SCR_ERROR, &serr);
1537 sata_scr_write_flush(&ap->link, SCR_ERROR, serr); 1541 sata_scr_write_flush(&ap->link, SCR_ERROR, serr);
@@ -1644,6 +1648,7 @@ static void mv_intr_edma(struct ata_port *ap)
1644 pp->resp_idx++; 1648 pp->resp_idx++;
1645 } 1649 }
1646 1650
1651 /* Update the software queue position index in hardware */
1647 if (work_done) 1652 if (work_done)
1648 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) | 1653 writelfl((pp->crpb_dma & EDMA_RSP_Q_BASE_LO_MASK) |
1649 (out_index << EDMA_RSP_Q_PTR_SHIFT), 1654 (out_index << EDMA_RSP_Q_PTR_SHIFT),
@@ -1696,7 +1701,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1696 for (port = port0; port < last_port; port++) { 1701 for (port = port0; port < last_port; port++) {
1697 struct ata_port *ap = host->ports[port]; 1702 struct ata_port *ap = host->ports[port];
1698 struct mv_port_priv *pp; 1703 struct mv_port_priv *pp;
1699 int have_err_bits, hard_port, shift; 1704 int have_err_bits, hardport, shift;
1700 1705
1701 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED)) 1706 if ((!ap) || (ap->flags & ATA_FLAG_DISABLED))
1702 continue; 1707 continue;
@@ -1707,7 +1712,7 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1707 if (port >= MV_PORTS_PER_HC) 1712 if (port >= MV_PORTS_PER_HC)
1708 shift++; /* skip bit 8 in the HC Main IRQ reg */ 1713 shift++; /* skip bit 8 in the HC Main IRQ reg */
1709 1714
1710 have_err_bits = ((PORT0_ERR << shift) & relevant); 1715 have_err_bits = ((ERR_IRQ << shift) & relevant);
1711 1716
1712 if (unlikely(have_err_bits)) { 1717 if (unlikely(have_err_bits)) {
1713 struct ata_queued_cmd *qc; 1718 struct ata_queued_cmd *qc;
@@ -1720,13 +1725,13 @@ static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1720 continue; 1725 continue;
1721 } 1726 }
1722 1727
1723 hard_port = mv_hardport_from_port(port); /* range 0..3 */ 1728 hardport = mv_hardport_from_port(port); /* range 0..3 */
1724 1729
1725 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) { 1730 if (pp->pp_flags & MV_PP_FLAG_EDMA_EN) {
1726 if ((CRPB_DMA_DONE << hard_port) & hc_irq_cause) 1731 if ((DMA_IRQ << hardport) & hc_irq_cause)
1727 mv_intr_edma(ap); 1732 mv_intr_edma(ap);
1728 } else { 1733 } else {
1729 if ((DEV_IRQ << hard_port) & hc_irq_cause) 1734 if ((DEV_IRQ << hardport) & hc_irq_cause)
1730 mv_intr_pio(ap); 1735 mv_intr_pio(ap);
1731 } 1736 }
1732 } 1737 }
@@ -1793,30 +1798,28 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1793 struct mv_host_priv *hpriv = host->private_data; 1798 struct mv_host_priv *hpriv = host->private_data;
1794 unsigned int hc, handled = 0, n_hcs; 1799 unsigned int hc, handled = 0, n_hcs;
1795 void __iomem *mmio = hpriv->base; 1800 void __iomem *mmio = hpriv->base;
1796 u32 irq_stat, irq_mask; 1801 u32 main_cause, main_mask;
1797 1802
1798 /* Note to self: &host->lock == &ap->host->lock == ap->lock */
1799 spin_lock(&host->lock); 1803 spin_lock(&host->lock);
1800 1804 main_cause = readl(hpriv->main_cause_reg_addr);
1801 irq_stat = readl(hpriv->main_cause_reg_addr); 1805 main_mask = readl(hpriv->main_mask_reg_addr);
1802 irq_mask = readl(hpriv->main_mask_reg_addr); 1806 /*
1803 1807 * Deal with cases where we either have nothing pending, or have read
1804 /* check the cases where we either have nothing pending or have read 1808 * a bogus register value which can indicate HW removal or PCI fault.
1805 * a bogus register value which can indicate HW removal or PCI fault
1806 */ 1809 */
1807 if (!(irq_stat & irq_mask) || (0xffffffffU == irq_stat)) 1810 if (!(main_cause & main_mask) || (main_cause == 0xffffffffU))
1808 goto out_unlock; 1811 goto out_unlock;
1809 1812
1810 n_hcs = mv_get_hc_count(host->ports[0]->flags); 1813 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1811 1814
1812 if (unlikely((irq_stat & PCI_ERR) && HAS_PCI(host))) { 1815 if (unlikely((main_cause & PCI_ERR) && HAS_PCI(host))) {
1813 mv_pci_error(host, mmio); 1816 mv_pci_error(host, mmio);
1814 handled = 1; 1817 handled = 1;
1815 goto out_unlock; /* skip all other HC irq handling */ 1818 goto out_unlock; /* skip all other HC irq handling */
1816 } 1819 }
1817 1820
1818 for (hc = 0; hc < n_hcs; hc++) { 1821 for (hc = 0; hc < n_hcs; hc++) {
1819 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1822 u32 relevant = main_cause & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1820 if (relevant) { 1823 if (relevant) {
1821 mv_host_intr(host, relevant, hc); 1824 mv_host_intr(host, relevant, hc);
1822 handled = 1; 1825 handled = 1;
@@ -1825,7 +1828,6 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance)
1825 1828
1826out_unlock: 1829out_unlock:
1827 spin_unlock(&host->lock); 1830 spin_unlock(&host->lock);
1828
1829 return IRQ_RETVAL(handled); 1831 return IRQ_RETVAL(handled);
1830} 1832}
1831 1833
@@ -2410,8 +2412,8 @@ static void mv_eh_freeze(struct ata_port *ap)
2410{ 2412{
2411 struct mv_host_priv *hpriv = ap->host->private_data; 2413 struct mv_host_priv *hpriv = ap->host->private_data;
2412 unsigned int hc = (ap->port_no > 3) ? 1 : 0; 2414 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2413 u32 tmp, mask;
2414 unsigned int shift; 2415 unsigned int shift;
2416 u32 main_mask;
2415 2417
2416 /* FIXME: handle coalescing completion events properly */ 2418 /* FIXME: handle coalescing completion events properly */
2417 2419
@@ -2419,11 +2421,10 @@ static void mv_eh_freeze(struct ata_port *ap)
2419 if (hc > 0) 2421 if (hc > 0)
2420 shift++; 2422 shift++;
2421 2423
2422 mask = 0x3 << shift;
2423
2424 /* disable assertion of portN err, done events */ 2424 /* disable assertion of portN err, done events */
2425 tmp = readl(hpriv->main_mask_reg_addr); 2425 main_mask = readl(hpriv->main_mask_reg_addr);
2426 writelfl(tmp & ~mask, hpriv->main_mask_reg_addr); 2426 main_mask &= ~((DONE_IRQ | ERR_IRQ) << shift);
2427 writelfl(main_mask, hpriv->main_mask_reg_addr);
2427} 2428}
2428 2429
2429static void mv_eh_thaw(struct ata_port *ap) 2430static void mv_eh_thaw(struct ata_port *ap)
@@ -2433,8 +2434,8 @@ static void mv_eh_thaw(struct ata_port *ap)
2433 unsigned int hc = (ap->port_no > 3) ? 1 : 0; 2434 unsigned int hc = (ap->port_no > 3) ? 1 : 0;
2434 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 2435 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
2435 void __iomem *port_mmio = mv_ap_base(ap); 2436 void __iomem *port_mmio = mv_ap_base(ap);
2436 u32 tmp, mask, hc_irq_cause;
2437 unsigned int shift, hc_port_no = ap->port_no; 2437 unsigned int shift, hc_port_no = ap->port_no;
2438 u32 main_mask, hc_irq_cause;
2438 2439
2439 /* FIXME: handle coalescing completion events properly */ 2440 /* FIXME: handle coalescing completion events properly */
2440 2441
@@ -2444,20 +2445,18 @@ static void mv_eh_thaw(struct ata_port *ap)
2444 hc_port_no -= 4; 2445 hc_port_no -= 4;
2445 } 2446 }
2446 2447
2447 mask = 0x3 << shift;
2448
2449 /* clear EDMA errors on this port */ 2448 /* clear EDMA errors on this port */
2450 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS); 2449 writel(0, port_mmio + EDMA_ERR_IRQ_CAUSE_OFS);
2451 2450
2452 /* clear pending irq events */ 2451 /* clear pending irq events */
2453 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS); 2452 hc_irq_cause = readl(hc_mmio + HC_IRQ_CAUSE_OFS);
2454 hc_irq_cause &= ~(1 << hc_port_no); /* clear CRPB-done */ 2453 hc_irq_cause &= ~((DEV_IRQ | DMA_IRQ) << hc_port_no);
2455 hc_irq_cause &= ~(1 << (hc_port_no + 8)); /* clear Device int */
2456 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS); 2454 writel(hc_irq_cause, hc_mmio + HC_IRQ_CAUSE_OFS);
2457 2455
2458 /* enable assertion of portN err, done events */ 2456 /* enable assertion of portN err, done events */
2459 tmp = readl(hpriv->main_mask_reg_addr); 2457 main_mask = readl(hpriv->main_mask_reg_addr);
2460 writelfl(tmp | mask, hpriv->main_mask_reg_addr); 2458 main_mask |= ((DONE_IRQ | ERR_IRQ) << shift);
2459 writelfl(main_mask, hpriv->main_mask_reg_addr);
2461} 2460}
2462 2461
2463/** 2462/**
@@ -2668,19 +2667,17 @@ static int mv_init_host(struct ata_host *host, unsigned int board_idx)
2668 2667
2669 rc = mv_chip_id(host, board_idx); 2668 rc = mv_chip_id(host, board_idx);
2670 if (rc) 2669 if (rc)
2671 goto done; 2670 goto done;
2672 2671
2673 if (HAS_PCI(host)) { 2672 if (HAS_PCI(host)) {
2674 hpriv->main_cause_reg_addr = hpriv->base + 2673 hpriv->main_cause_reg_addr = mmio + HC_MAIN_IRQ_CAUSE_OFS;
2675 HC_MAIN_IRQ_CAUSE_OFS; 2674 hpriv->main_mask_reg_addr = mmio + HC_MAIN_IRQ_MASK_OFS;
2676 hpriv->main_mask_reg_addr = hpriv->base + HC_MAIN_IRQ_MASK_OFS;
2677 } else { 2675 } else {
2678 hpriv->main_cause_reg_addr = hpriv->base + 2676 hpriv->main_cause_reg_addr = mmio + HC_SOC_MAIN_IRQ_CAUSE_OFS;
2679 HC_SOC_MAIN_IRQ_CAUSE_OFS; 2677 hpriv->main_mask_reg_addr = mmio + HC_SOC_MAIN_IRQ_MASK_OFS;
2680 hpriv->main_mask_reg_addr = hpriv->base +
2681 HC_SOC_MAIN_IRQ_MASK_OFS;
2682 } 2678 }
2683 /* global interrupt mask */ 2679
2680 /* global interrupt mask: 0 == mask everything */
2684 writel(0, hpriv->main_mask_reg_addr); 2681 writel(0, hpriv->main_mask_reg_addr);
2685 2682
2686 n_hc = mv_get_hc_count(host->ports[0]->flags); 2683 n_hc = mv_get_hc_count(host->ports[0]->flags);