aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/ata/sata_mv.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-08-24 03:19:22 -0400
committerJeff Garzik <jeff@garzik.org>2006-08-24 03:19:22 -0400
commitcca3974e48607c3775dc73b544a5700b2e37c21a (patch)
tree0777d6121ba199af0aad196eb5a693510ec8e62e /drivers/ata/sata_mv.c
parent54a86bfc3d4601be9c36cd4e8a1bdc580c98fa6a (diff)
libata: Grand renaming.
The biggest change is that ata_host_set is renamed to ata_host. * ata_host_set => ata_host * ata_probe_ent->host_flags => ata_probe_ent->port_flags * ata_probe_ent->host_set_flags => ata_probe_ent->_host_flags * ata_host_stats => ata_port_stats * ata_port->host => ata_port->scsi_host * ata_port->host_set => ata_port->host * ata_port_info->host_flags => ata_port_info->flags * ata_(.*)host_set(.*)\(\) => ata_\1host\2() The leading underscore in ata_probe_ent->_host_flags is to avoid reusing ->host_flags for different purpose. Currently, the only user of the field is libata-bmdma.c and probe_ent itself is scheduled to be removed. ata_port->host is reused for different purpose but this field is used inside libata core proper and of different type. Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/ata/sata_mv.c')
-rw-r--r--drivers/ata/sata_mv.c98
1 files changed, 48 insertions, 50 deletions
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index a2915a56accd..34f1939b44c9 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -342,7 +342,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in);
342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val); 342static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val);
343static void mv_phy_reset(struct ata_port *ap); 343static void mv_phy_reset(struct ata_port *ap);
344static void __mv_phy_reset(struct ata_port *ap, int can_sleep); 344static void __mv_phy_reset(struct ata_port *ap, int can_sleep);
345static void mv_host_stop(struct ata_host_set *host_set); 345static void mv_host_stop(struct ata_host *host);
346static int mv_port_start(struct ata_port *ap); 346static int mv_port_start(struct ata_port *ap);
347static void mv_port_stop(struct ata_port *ap); 347static void mv_port_stop(struct ata_port *ap);
348static void mv_qc_prep(struct ata_queued_cmd *qc); 348static void mv_qc_prep(struct ata_queued_cmd *qc);
@@ -480,35 +480,35 @@ static const struct ata_port_operations mv_iie_ops = {
480static const struct ata_port_info mv_port_info[] = { 480static const struct ata_port_info mv_port_info[] = {
481 { /* chip_504x */ 481 { /* chip_504x */
482 .sht = &mv_sht, 482 .sht = &mv_sht,
483 .host_flags = MV_COMMON_FLAGS, 483 .flags = MV_COMMON_FLAGS,
484 .pio_mask = 0x1f, /* pio0-4 */ 484 .pio_mask = 0x1f, /* pio0-4 */
485 .udma_mask = 0x7f, /* udma0-6 */ 485 .udma_mask = 0x7f, /* udma0-6 */
486 .port_ops = &mv5_ops, 486 .port_ops = &mv5_ops,
487 }, 487 },
488 { /* chip_508x */ 488 { /* chip_508x */
489 .sht = &mv_sht, 489 .sht = &mv_sht,
490 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 490 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
491 .pio_mask = 0x1f, /* pio0-4 */ 491 .pio_mask = 0x1f, /* pio0-4 */
492 .udma_mask = 0x7f, /* udma0-6 */ 492 .udma_mask = 0x7f, /* udma0-6 */
493 .port_ops = &mv5_ops, 493 .port_ops = &mv5_ops,
494 }, 494 },
495 { /* chip_5080 */ 495 { /* chip_5080 */
496 .sht = &mv_sht, 496 .sht = &mv_sht,
497 .host_flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC), 497 .flags = (MV_COMMON_FLAGS | MV_FLAG_DUAL_HC),
498 .pio_mask = 0x1f, /* pio0-4 */ 498 .pio_mask = 0x1f, /* pio0-4 */
499 .udma_mask = 0x7f, /* udma0-6 */ 499 .udma_mask = 0x7f, /* udma0-6 */
500 .port_ops = &mv5_ops, 500 .port_ops = &mv5_ops,
501 }, 501 },
502 { /* chip_604x */ 502 { /* chip_604x */
503 .sht = &mv_sht, 503 .sht = &mv_sht,
504 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 504 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
505 .pio_mask = 0x1f, /* pio0-4 */ 505 .pio_mask = 0x1f, /* pio0-4 */
506 .udma_mask = 0x7f, /* udma0-6 */ 506 .udma_mask = 0x7f, /* udma0-6 */
507 .port_ops = &mv6_ops, 507 .port_ops = &mv6_ops,
508 }, 508 },
509 { /* chip_608x */ 509 { /* chip_608x */
510 .sht = &mv_sht, 510 .sht = &mv_sht,
511 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 511 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
512 MV_FLAG_DUAL_HC), 512 MV_FLAG_DUAL_HC),
513 .pio_mask = 0x1f, /* pio0-4 */ 513 .pio_mask = 0x1f, /* pio0-4 */
514 .udma_mask = 0x7f, /* udma0-6 */ 514 .udma_mask = 0x7f, /* udma0-6 */
@@ -516,14 +516,14 @@ static const struct ata_port_info mv_port_info[] = {
516 }, 516 },
517 { /* chip_6042 */ 517 { /* chip_6042 */
518 .sht = &mv_sht, 518 .sht = &mv_sht,
519 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS), 519 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS),
520 .pio_mask = 0x1f, /* pio0-4 */ 520 .pio_mask = 0x1f, /* pio0-4 */
521 .udma_mask = 0x7f, /* udma0-6 */ 521 .udma_mask = 0x7f, /* udma0-6 */
522 .port_ops = &mv_iie_ops, 522 .port_ops = &mv_iie_ops,
523 }, 523 },
524 { /* chip_7042 */ 524 { /* chip_7042 */
525 .sht = &mv_sht, 525 .sht = &mv_sht,
526 .host_flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS | 526 .flags = (MV_COMMON_FLAGS | MV_6XXX_FLAGS |
527 MV_FLAG_DUAL_HC), 527 MV_FLAG_DUAL_HC),
528 .pio_mask = 0x1f, /* pio0-4 */ 528 .pio_mask = 0x1f, /* pio0-4 */
529 .udma_mask = 0x7f, /* udma0-6 */ 529 .udma_mask = 0x7f, /* udma0-6 */
@@ -618,12 +618,12 @@ static inline void __iomem *mv_port_base(void __iomem *base, unsigned int port)
618 618
619static inline void __iomem *mv_ap_base(struct ata_port *ap) 619static inline void __iomem *mv_ap_base(struct ata_port *ap)
620{ 620{
621 return mv_port_base(ap->host_set->mmio_base, ap->port_no); 621 return mv_port_base(ap->host->mmio_base, ap->port_no);
622} 622}
623 623
624static inline int mv_get_hc_count(unsigned long host_flags) 624static inline int mv_get_hc_count(unsigned long port_flags)
625{ 625{
626 return ((host_flags & MV_FLAG_DUAL_HC) ? 2 : 1); 626 return ((port_flags & MV_FLAG_DUAL_HC) ? 2 : 1);
627} 627}
628 628
629static void mv_irq_clear(struct ata_port *ap) 629static void mv_irq_clear(struct ata_port *ap)
@@ -809,7 +809,7 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
809 809
810/** 810/**
811 * mv_host_stop - Host specific cleanup/stop routine. 811 * mv_host_stop - Host specific cleanup/stop routine.
812 * @host_set: host data structure 812 * @host: host data structure
813 * 813 *
814 * Disable ints, cleanup host memory, call general purpose 814 * Disable ints, cleanup host memory, call general purpose
815 * host_stop. 815 * host_stop.
@@ -817,10 +817,10 @@ static void mv_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
817 * LOCKING: 817 * LOCKING:
818 * Inherited from caller. 818 * Inherited from caller.
819 */ 819 */
820static void mv_host_stop(struct ata_host_set *host_set) 820static void mv_host_stop(struct ata_host *host)
821{ 821{
822 struct mv_host_priv *hpriv = host_set->private_data; 822 struct mv_host_priv *hpriv = host->private_data;
823 struct pci_dev *pdev = to_pci_dev(host_set->dev); 823 struct pci_dev *pdev = to_pci_dev(host->dev);
824 824
825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) { 825 if (hpriv->hp_flags & MV_HP_FLAG_MSI) {
826 pci_disable_msi(pdev); 826 pci_disable_msi(pdev);
@@ -828,7 +828,7 @@ static void mv_host_stop(struct ata_host_set *host_set)
828 pci_intx(pdev, 0); 828 pci_intx(pdev, 0);
829 } 829 }
830 kfree(hpriv); 830 kfree(hpriv);
831 ata_host_stop(host_set); 831 ata_host_stop(host);
832} 832}
833 833
834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev) 834static inline void mv_priv_free(struct mv_port_priv *pp, struct device *dev)
@@ -875,8 +875,8 @@ static void mv_edma_cfg(struct mv_host_priv *hpriv, void __iomem *port_mmio)
875 */ 875 */
876static int mv_port_start(struct ata_port *ap) 876static int mv_port_start(struct ata_port *ap)
877{ 877{
878 struct device *dev = ap->host_set->dev; 878 struct device *dev = ap->host->dev;
879 struct mv_host_priv *hpriv = ap->host_set->private_data; 879 struct mv_host_priv *hpriv = ap->host->private_data;
880 struct mv_port_priv *pp; 880 struct mv_port_priv *pp;
881 void __iomem *port_mmio = mv_ap_base(ap); 881 void __iomem *port_mmio = mv_ap_base(ap);
882 void *mem; 882 void *mem;
@@ -965,17 +965,17 @@ err_out:
965 * Stop DMA, cleanup port memory. 965 * Stop DMA, cleanup port memory.
966 * 966 *
967 * LOCKING: 967 * LOCKING:
968 * This routine uses the host_set lock to protect the DMA stop. 968 * This routine uses the host lock to protect the DMA stop.
969 */ 969 */
970static void mv_port_stop(struct ata_port *ap) 970static void mv_port_stop(struct ata_port *ap)
971{ 971{
972 struct device *dev = ap->host_set->dev; 972 struct device *dev = ap->host->dev;
973 struct mv_port_priv *pp = ap->private_data; 973 struct mv_port_priv *pp = ap->private_data;
974 unsigned long flags; 974 unsigned long flags;
975 975
976 spin_lock_irqsave(&ap->host_set->lock, flags); 976 spin_lock_irqsave(&ap->host->lock, flags);
977 mv_stop_dma(ap); 977 mv_stop_dma(ap);
978 spin_unlock_irqrestore(&ap->host_set->lock, flags); 978 spin_unlock_irqrestore(&ap->host->lock, flags);
979 979
980 ap->private_data = NULL; 980 ap->private_data = NULL;
981 ata_pad_free(ap, dev); 981 ata_pad_free(ap, dev);
@@ -1330,7 +1330,7 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1330 1330
1331/** 1331/**
1332 * mv_host_intr - Handle all interrupts on the given host controller 1332 * mv_host_intr - Handle all interrupts on the given host controller
1333 * @host_set: host specific structure 1333 * @host: host specific structure
1334 * @relevant: port error bits relevant to this host controller 1334 * @relevant: port error bits relevant to this host controller
1335 * @hc: which host controller we're to look at 1335 * @hc: which host controller we're to look at
1336 * 1336 *
@@ -1344,10 +1344,9 @@ static void mv_err_intr(struct ata_port *ap, int reset_allowed)
1344 * LOCKING: 1344 * LOCKING:
1345 * Inherited from caller. 1345 * Inherited from caller.
1346 */ 1346 */
1347static void mv_host_intr(struct ata_host_set *host_set, u32 relevant, 1347static void mv_host_intr(struct ata_host *host, u32 relevant, unsigned int hc)
1348 unsigned int hc)
1349{ 1348{
1350 void __iomem *mmio = host_set->mmio_base; 1349 void __iomem *mmio = host->mmio_base;
1351 void __iomem *hc_mmio = mv_hc_base(mmio, hc); 1350 void __iomem *hc_mmio = mv_hc_base(mmio, hc);
1352 struct ata_queued_cmd *qc; 1351 struct ata_queued_cmd *qc;
1353 u32 hc_irq_cause; 1352 u32 hc_irq_cause;
@@ -1371,7 +1370,7 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1371 1370
1372 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) { 1371 for (port = port0; port < port0 + MV_PORTS_PER_HC; port++) {
1373 u8 ata_status = 0; 1372 u8 ata_status = 0;
1374 struct ata_port *ap = host_set->ports[port]; 1373 struct ata_port *ap = host->ports[port];
1375 struct mv_port_priv *pp = ap->private_data; 1374 struct mv_port_priv *pp = ap->private_data;
1376 1375
1377 hard_port = mv_hardport_from_port(port); /* range 0..3 */ 1376 hard_port = mv_hardport_from_port(port); /* range 0..3 */
@@ -1444,15 +1443,15 @@ static void mv_host_intr(struct ata_host_set *host_set, u32 relevant,
1444 * reported here. 1443 * reported here.
1445 * 1444 *
1446 * LOCKING: 1445 * LOCKING:
1447 * This routine holds the host_set lock while processing pending 1446 * This routine holds the host lock while processing pending
1448 * interrupts. 1447 * interrupts.
1449 */ 1448 */
1450static irqreturn_t mv_interrupt(int irq, void *dev_instance, 1449static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1451 struct pt_regs *regs) 1450 struct pt_regs *regs)
1452{ 1451{
1453 struct ata_host_set *host_set = dev_instance; 1452 struct ata_host *host = dev_instance;
1454 unsigned int hc, handled = 0, n_hcs; 1453 unsigned int hc, handled = 0, n_hcs;
1455 void __iomem *mmio = host_set->mmio_base; 1454 void __iomem *mmio = host->mmio_base;
1456 struct mv_host_priv *hpriv; 1455 struct mv_host_priv *hpriv;
1457 u32 irq_stat; 1456 u32 irq_stat;
1458 1457
@@ -1465,18 +1464,18 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1465 return IRQ_NONE; 1464 return IRQ_NONE;
1466 } 1465 }
1467 1466
1468 n_hcs = mv_get_hc_count(host_set->ports[0]->flags); 1467 n_hcs = mv_get_hc_count(host->ports[0]->flags);
1469 spin_lock(&host_set->lock); 1468 spin_lock(&host->lock);
1470 1469
1471 for (hc = 0; hc < n_hcs; hc++) { 1470 for (hc = 0; hc < n_hcs; hc++) {
1472 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT)); 1471 u32 relevant = irq_stat & (HC0_IRQ_PEND << (hc * HC_SHIFT));
1473 if (relevant) { 1472 if (relevant) {
1474 mv_host_intr(host_set, relevant, hc); 1473 mv_host_intr(host, relevant, hc);
1475 handled++; 1474 handled++;
1476 } 1475 }
1477 } 1476 }
1478 1477
1479 hpriv = host_set->private_data; 1478 hpriv = host->private_data;
1480 if (IS_60XX(hpriv)) { 1479 if (IS_60XX(hpriv)) {
1481 /* deal with the interrupt coalescing bits */ 1480 /* deal with the interrupt coalescing bits */
1482 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) { 1481 if (irq_stat & (TRAN_LO_DONE | TRAN_HI_DONE | PORTS_0_7_COAL_DONE)) {
@@ -1491,12 +1490,12 @@ static irqreturn_t mv_interrupt(int irq, void *dev_instance,
1491 readl(mmio + PCI_IRQ_CAUSE_OFS)); 1490 readl(mmio + PCI_IRQ_CAUSE_OFS));
1492 1491
1493 DPRINTK("All regs @ PCI error\n"); 1492 DPRINTK("All regs @ PCI error\n");
1494 mv_dump_all_regs(mmio, -1, to_pci_dev(host_set->dev)); 1493 mv_dump_all_regs(mmio, -1, to_pci_dev(host->dev));
1495 1494
1496 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS); 1495 writelfl(0, mmio + PCI_IRQ_CAUSE_OFS);
1497 handled++; 1496 handled++;
1498 } 1497 }
1499 spin_unlock(&host_set->lock); 1498 spin_unlock(&host->lock);
1500 1499
1501 return IRQ_RETVAL(handled); 1500 return IRQ_RETVAL(handled);
1502} 1501}
@@ -1528,7 +1527,7 @@ static unsigned int mv5_scr_offset(unsigned int sc_reg_in)
1528 1527
1529static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in) 1528static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1530{ 1529{
1531 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no); 1530 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1532 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1531 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1533 1532
1534 if (ofs != 0xffffffffU) 1533 if (ofs != 0xffffffffU)
@@ -1539,7 +1538,7 @@ static u32 mv5_scr_read(struct ata_port *ap, unsigned int sc_reg_in)
1539 1538
1540static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val) 1539static void mv5_scr_write(struct ata_port *ap, unsigned int sc_reg_in, u32 val)
1541{ 1540{
1542 void __iomem *mmio = mv5_phy_base(ap->host_set->mmio_base, ap->port_no); 1541 void __iomem *mmio = mv5_phy_base(ap->host->mmio_base, ap->port_no);
1543 unsigned int ofs = mv5_scr_offset(sc_reg_in); 1542 unsigned int ofs = mv5_scr_offset(sc_reg_in);
1544 1543
1545 if (ofs != 0xffffffffU) 1544 if (ofs != 0xffffffffU)
@@ -1904,8 +1903,8 @@ static void mv_channel_reset(struct mv_host_priv *hpriv, void __iomem *mmio,
1904 1903
1905static void mv_stop_and_reset(struct ata_port *ap) 1904static void mv_stop_and_reset(struct ata_port *ap)
1906{ 1905{
1907 struct mv_host_priv *hpriv = ap->host_set->private_data; 1906 struct mv_host_priv *hpriv = ap->host->private_data;
1908 void __iomem *mmio = ap->host_set->mmio_base; 1907 void __iomem *mmio = ap->host->mmio_base;
1909 1908
1910 mv_stop_dma(ap); 1909 mv_stop_dma(ap);
1911 1910
@@ -1936,7 +1935,7 @@ static inline void __msleep(unsigned int msec, int can_sleep)
1936static void __mv_phy_reset(struct ata_port *ap, int can_sleep) 1935static void __mv_phy_reset(struct ata_port *ap, int can_sleep)
1937{ 1936{
1938 struct mv_port_priv *pp = ap->private_data; 1937 struct mv_port_priv *pp = ap->private_data;
1939 struct mv_host_priv *hpriv = ap->host_set->private_data; 1938 struct mv_host_priv *hpriv = ap->host->private_data;
1940 void __iomem *port_mmio = mv_ap_base(ap); 1939 void __iomem *port_mmio = mv_ap_base(ap);
1941 struct ata_taskfile tf; 1940 struct ata_taskfile tf;
1942 struct ata_device *dev = &ap->device[0]; 1941 struct ata_device *dev = &ap->device[0];
@@ -2035,7 +2034,7 @@ static void mv_phy_reset(struct ata_port *ap)
2035 * chip/bus, fail the command, and move on. 2034 * chip/bus, fail the command, and move on.
2036 * 2035 *
2037 * LOCKING: 2036 * LOCKING:
2038 * This routine holds the host_set lock while failing the command. 2037 * This routine holds the host lock while failing the command.
2039 */ 2038 */
2040static void mv_eng_timeout(struct ata_port *ap) 2039static void mv_eng_timeout(struct ata_port *ap)
2041{ 2040{
@@ -2044,18 +2043,17 @@ static void mv_eng_timeout(struct ata_port *ap)
2044 2043
2045 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n"); 2044 ata_port_printk(ap, KERN_ERR, "Entering mv_eng_timeout\n");
2046 DPRINTK("All regs @ start of eng_timeout\n"); 2045 DPRINTK("All regs @ start of eng_timeout\n");
2047 mv_dump_all_regs(ap->host_set->mmio_base, ap->port_no, 2046 mv_dump_all_regs(ap->host->mmio_base, ap->port_no,
2048 to_pci_dev(ap->host_set->dev)); 2047 to_pci_dev(ap->host->dev));
2049 2048
2050 qc = ata_qc_from_tag(ap, ap->active_tag); 2049 qc = ata_qc_from_tag(ap, ap->active_tag);
2051 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n", 2050 printk(KERN_ERR "mmio_base %p ap %p qc %p scsi_cmnd %p &cmnd %p\n",
2052 ap->host_set->mmio_base, ap, qc, qc->scsicmd, 2051 ap->host->mmio_base, ap, qc, qc->scsicmd, &qc->scsicmd->cmnd);
2053 &qc->scsicmd->cmnd);
2054 2052
2055 spin_lock_irqsave(&ap->host_set->lock, flags); 2053 spin_lock_irqsave(&ap->host->lock, flags);
2056 mv_err_intr(ap, 0); 2054 mv_err_intr(ap, 0);
2057 mv_stop_and_reset(ap); 2055 mv_stop_and_reset(ap);
2058 spin_unlock_irqrestore(&ap->host_set->lock, flags); 2056 spin_unlock_irqrestore(&ap->host->lock, flags);
2059 2057
2060 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE)); 2058 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
2061 if (qc->flags & ATA_QCFLAG_ACTIVE) { 2059 if (qc->flags & ATA_QCFLAG_ACTIVE) {
@@ -2236,7 +2234,7 @@ static int mv_init_host(struct pci_dev *pdev, struct ata_probe_ent *probe_ent,
2236 if (rc) 2234 if (rc)
2237 goto done; 2235 goto done;
2238 2236
2239 n_hc = mv_get_hc_count(probe_ent->host_flags); 2237 n_hc = mv_get_hc_count(probe_ent->port_flags);
2240 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc; 2238 probe_ent->n_ports = MV_PORTS_PER_HC * n_hc;
2241 2239
2242 for (port = 0; port < probe_ent->n_ports; port++) 2240 for (port = 0; port < probe_ent->n_ports; port++)
@@ -2389,7 +2387,7 @@ static int mv_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2389 memset(hpriv, 0, sizeof(*hpriv)); 2387 memset(hpriv, 0, sizeof(*hpriv));
2390 2388
2391 probe_ent->sht = mv_port_info[board_idx].sht; 2389 probe_ent->sht = mv_port_info[board_idx].sht;
2392 probe_ent->host_flags = mv_port_info[board_idx].host_flags; 2390 probe_ent->port_flags = mv_port_info[board_idx].flags;
2393 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask; 2391 probe_ent->pio_mask = mv_port_info[board_idx].pio_mask;
2394 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask; 2392 probe_ent->udma_mask = mv_port_info[board_idx].udma_mask;
2395 probe_ent->port_ops = mv_port_info[board_idx].port_ops; 2393 probe_ent->port_ops = mv_port_info[board_idx].port_ops;