aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/sungem.c
diff options
context:
space:
mode:
authorJeff Garzik <jeff@garzik.org>2006-09-13 13:24:59 -0400
committerJeff Garzik <jeff@garzik.org>2006-09-13 13:24:59 -0400
commit6aa20a2235535605db6d6d2bd850298b2fe7f31e (patch)
treedf0b855043407b831d57f2f2c271f8aab48444f4 /drivers/net/sungem.c
parent7a291083225af6e22ffaa46b3d91cfc1a1ccaab4 (diff)
drivers/net: Trim trailing whitespace
Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/net/sungem.c')
-rw-r--r--drivers/net/sungem.c84
1 files changed, 42 insertions, 42 deletions
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 1a441a8a2add..eb8a47605837 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -2,21 +2,21 @@
2 * sungem.c: Sun GEM ethernet driver. 2 * sungem.c: Sun GEM ethernet driver.
3 * 3 *
4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com) 4 * Copyright (C) 2000, 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5 * 5 *
6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management 6 * Support for Apple GMAC and assorted PHYs, WOL, Power Management
7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org) 7 * (C) 2001,2002,2003 Benjamin Herrenscmidt (benh@kernel.crashing.org)
8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp. 8 * (C) 2004,2005 Benjamin Herrenscmidt, IBM Corp.
9 * 9 *
10 * NAPI and NETPOLL support 10 * NAPI and NETPOLL support
11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com) 11 * (C) 2004 by Eric Lemoine (eric.lemoine@gmail.com)
12 * 12 *
13 * TODO: 13 * TODO:
14 * - Now that the driver was significantly simplified, I need to rework 14 * - Now that the driver was significantly simplified, I need to rework
15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably 15 * the locking. I'm sure we don't need _2_ spinlocks, and we probably
16 * can avoid taking most of them for so long period of time (and schedule 16 * can avoid taking most of them for so long period of time (and schedule
17 * instead). The main issues at this point are caused by the netdev layer 17 * instead). The main issues at this point are caused by the netdev layer
18 * though: 18 * though:
19 * 19 *
20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock() 20 * gem_change_mtu() and gem_set_multicast() are called with a read_lock()
21 * help by net/core/dev.c, thus they can't schedule. That means they can't 21 * help by net/core/dev.c, thus they can't schedule. That means they can't
22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock 22 * call netif_poll_disable() neither, thus force gem_poll() to keep a spinlock
@@ -113,7 +113,7 @@ static struct pci_device_id gem_pci_tbl[] = {
113 /* These models only differ from the original GEM in 113 /* These models only differ from the original GEM in
114 * that their tx/rx fifos are of a different size and 114 * that their tx/rx fifos are of a different size and
115 * they only support 10/100 speeds. -DaveM 115 * they only support 10/100 speeds. -DaveM
116 * 116 *
117 * Apple's GMAC does support gigabit on machines with 117 * Apple's GMAC does support gigabit on machines with
118 * the BCM54xx PHYs. -BenH 118 * the BCM54xx PHYs. -BenH
119 */ 119 */
@@ -885,7 +885,7 @@ static int gem_poll(struct net_device *dev, int *budget)
885 unsigned long flags; 885 unsigned long flags;
886 886
887 /* 887 /*
888 * NAPI locking nightmare: See comment at head of driver 888 * NAPI locking nightmare: See comment at head of driver
889 */ 889 */
890 spin_lock_irqsave(&gp->lock, flags); 890 spin_lock_irqsave(&gp->lock, flags);
891 891
@@ -905,8 +905,8 @@ static int gem_poll(struct net_device *dev, int *budget)
905 905
906 spin_unlock_irqrestore(&gp->lock, flags); 906 spin_unlock_irqrestore(&gp->lock, flags);
907 907
908 /* Run RX thread. We don't use any locking here, 908 /* Run RX thread. We don't use any locking here,
909 * code willing to do bad things - like cleaning the 909 * code willing to do bad things - like cleaning the
910 * rx ring - must call netif_poll_disable(), which 910 * rx ring - must call netif_poll_disable(), which
911 * schedule_timeout()'s if polling is already disabled. 911 * schedule_timeout()'s if polling is already disabled.
912 */ 912 */
@@ -921,7 +921,7 @@ static int gem_poll(struct net_device *dev, int *budget)
921 return 1; 921 return 1;
922 922
923 spin_lock_irqsave(&gp->lock, flags); 923 spin_lock_irqsave(&gp->lock, flags);
924 924
925 gp->status = readl(gp->regs + GREG_STAT); 925 gp->status = readl(gp->regs + GREG_STAT);
926 } while (gp->status & GREG_STAT_NAPI); 926 } while (gp->status & GREG_STAT_NAPI);
927 927
@@ -946,7 +946,7 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
946 return IRQ_HANDLED; 946 return IRQ_HANDLED;
947 947
948 spin_lock_irqsave(&gp->lock, flags); 948 spin_lock_irqsave(&gp->lock, flags);
949 949
950 if (netif_rx_schedule_prep(dev)) { 950 if (netif_rx_schedule_prep(dev)) {
951 u32 gem_status = readl(gp->regs + GREG_STAT); 951 u32 gem_status = readl(gp->regs + GREG_STAT);
952 952
@@ -961,9 +961,9 @@ static irqreturn_t gem_interrupt(int irq, void *dev_id, struct pt_regs *regs)
961 } 961 }
962 962
963 spin_unlock_irqrestore(&gp->lock, flags); 963 spin_unlock_irqrestore(&gp->lock, flags);
964 964
965 /* If polling was disabled at the time we received that 965 /* If polling was disabled at the time we received that
966 * interrupt, we may return IRQ_HANDLED here while we 966 * interrupt, we may return IRQ_HANDLED here while we
967 * should return IRQ_NONE. No big deal... 967 * should return IRQ_NONE. No big deal...
968 */ 968 */
969 return IRQ_HANDLED; 969 return IRQ_HANDLED;
@@ -1112,7 +1112,7 @@ static int gem_start_xmit(struct sk_buff *skb, struct net_device *dev)
1112 this_ctrl = ctrl; 1112 this_ctrl = ctrl;
1113 if (frag == skb_shinfo(skb)->nr_frags - 1) 1113 if (frag == skb_shinfo(skb)->nr_frags - 1)
1114 this_ctrl |= TXDCTRL_EOF; 1114 this_ctrl |= TXDCTRL_EOF;
1115 1115
1116 txd = &gp->init_block->txd[entry]; 1116 txd = &gp->init_block->txd[entry];
1117 txd->buffer = cpu_to_le64(mapping); 1117 txd->buffer = cpu_to_le64(mapping);
1118 wmb(); 1118 wmb();
@@ -1178,7 +1178,7 @@ static void gem_reset(struct gem *gp)
1178static void gem_start_dma(struct gem *gp) 1178static void gem_start_dma(struct gem *gp)
1179{ 1179{
1180 u32 val; 1180 u32 val;
1181 1181
1182 /* We are ready to rock, turn everything on. */ 1182 /* We are ready to rock, turn everything on. */
1183 val = readl(gp->regs + TXDMA_CFG); 1183 val = readl(gp->regs + TXDMA_CFG);
1184 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG); 1184 writel(val | TXDMA_CFG_ENABLE, gp->regs + TXDMA_CFG);
@@ -1246,7 +1246,7 @@ static void gem_begin_auto_negotiation(struct gem *gp, struct ethtool_cmd *ep)
1246 autoneg = gp->want_autoneg; 1246 autoneg = gp->want_autoneg;
1247 speed = gp->phy_mii.speed; 1247 speed = gp->phy_mii.speed;
1248 duplex = gp->phy_mii.duplex; 1248 duplex = gp->phy_mii.duplex;
1249 1249
1250 /* Setup link parameters */ 1250 /* Setup link parameters */
1251 if (!ep) 1251 if (!ep)
1252 goto start_aneg; 1252 goto start_aneg;
@@ -1276,7 +1276,7 @@ start_aneg:
1276 duplex = DUPLEX_HALF; 1276 duplex = DUPLEX_HALF;
1277 if (speed == 0) 1277 if (speed == 0)
1278 speed = SPEED_10; 1278 speed = SPEED_10;
1279 1279
1280 /* If we are asleep, we don't try to actually setup the PHY, we 1280 /* If we are asleep, we don't try to actually setup the PHY, we
1281 * just store the settings 1281 * just store the settings
1282 */ 1282 */
@@ -1345,7 +1345,7 @@ static int gem_set_link_modes(struct gem *gp)
1345 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL); 1345 val |= (MAC_TXCFG_ICS | MAC_TXCFG_ICOLL);
1346 } else { 1346 } else {
1347 /* MAC_TXCFG_NBO must be zero. */ 1347 /* MAC_TXCFG_NBO must be zero. */
1348 } 1348 }
1349 writel(val, gp->regs + MAC_TXCFG); 1349 writel(val, gp->regs + MAC_TXCFG);
1350 1350
1351 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED); 1351 val = (MAC_XIFCFG_OE | MAC_XIFCFG_LLED);
@@ -1470,7 +1470,7 @@ static void gem_link_timer(unsigned long data)
1470{ 1470{
1471 struct gem *gp = (struct gem *) data; 1471 struct gem *gp = (struct gem *) data;
1472 int restart_aneg = 0; 1472 int restart_aneg = 0;
1473 1473
1474 if (gp->asleep) 1474 if (gp->asleep)
1475 return; 1475 return;
1476 1476
@@ -1483,7 +1483,7 @@ static void gem_link_timer(unsigned long data)
1483 */ 1483 */
1484 if (gp->reset_task_pending) 1484 if (gp->reset_task_pending)
1485 goto restart; 1485 goto restart;
1486 1486
1487 if (gp->phy_type == phy_serialink || 1487 if (gp->phy_type == phy_serialink ||
1488 gp->phy_type == phy_serdes) { 1488 gp->phy_type == phy_serdes) {
1489 u32 val = readl(gp->regs + PCS_MIISTAT); 1489 u32 val = readl(gp->regs + PCS_MIISTAT);
@@ -1660,7 +1660,7 @@ static void gem_init_phy(struct gem *gp)
1660 mifcfg = readl(gp->regs + MIF_CFG); 1660 mifcfg = readl(gp->regs + MIF_CFG);
1661 mifcfg &= ~MIF_CFG_BBMODE; 1661 mifcfg &= ~MIF_CFG_BBMODE;
1662 writel(mifcfg, gp->regs + MIF_CFG); 1662 writel(mifcfg, gp->regs + MIF_CFG);
1663 1663
1664 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) { 1664 if (gp->pdev->vendor == PCI_VENDOR_ID_APPLE) {
1665 int i; 1665 int i;
1666 1666
@@ -1823,7 +1823,7 @@ static u32 gem_setup_multicast(struct gem *gp)
1823{ 1823{
1824 u32 rxcfg = 0; 1824 u32 rxcfg = 0;
1825 int i; 1825 int i;
1826 1826
1827 if ((gp->dev->flags & IFF_ALLMULTI) || 1827 if ((gp->dev->flags & IFF_ALLMULTI) ||
1828 (gp->dev->mc_count > 256)) { 1828 (gp->dev->mc_count > 256)) {
1829 for (i=0; i<16; i++) 1829 for (i=0; i<16; i++)
@@ -1985,7 +1985,7 @@ static void gem_init_pause_thresholds(struct gem *gp)
1985 cfg = ((2 << 1) & GREG_CFG_TXDMALIM); 1985 cfg = ((2 << 1) & GREG_CFG_TXDMALIM);
1986 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM); 1986 cfg |= ((8 << 6) & GREG_CFG_RXDMALIM);
1987 writel(cfg, gp->regs + GREG_CFG); 1987 writel(cfg, gp->regs + GREG_CFG);
1988 } 1988 }
1989} 1989}
1990 1990
1991static int gem_check_invariants(struct gem *gp) 1991static int gem_check_invariants(struct gem *gp)
@@ -2039,7 +2039,7 @@ static int gem_check_invariants(struct gem *gp)
2039 /* Determine initial PHY interface type guess. MDIO1 is the 2039 /* Determine initial PHY interface type guess. MDIO1 is the
2040 * external PHY and thus takes precedence over MDIO0. 2040 * external PHY and thus takes precedence over MDIO0.
2041 */ 2041 */
2042 2042
2043 if (mif_cfg & MIF_CFG_MDI1) { 2043 if (mif_cfg & MIF_CFG_MDI1) {
2044 gp->phy_type = phy_mii_mdio1; 2044 gp->phy_type = phy_mii_mdio1;
2045 mif_cfg |= MIF_CFG_PSELECT; 2045 mif_cfg |= MIF_CFG_PSELECT;
@@ -2141,7 +2141,7 @@ static void gem_stop_phy(struct gem *gp, int wol)
2141 2141
2142 /* Setup wake-on-lan for MAGIC packet */ 2142 /* Setup wake-on-lan for MAGIC packet */
2143 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB, 2143 writel(MAC_RXCFG_HFE | MAC_RXCFG_SFCS | MAC_RXCFG_ENAB,
2144 gp->regs + MAC_RXCFG); 2144 gp->regs + MAC_RXCFG);
2145 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0); 2145 writel((e[4] << 8) | e[5], gp->regs + WOL_MATCH0);
2146 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1); 2146 writel((e[2] << 8) | e[3], gp->regs + WOL_MATCH1);
2147 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2); 2147 writel((e[0] << 8) | e[1], gp->regs + WOL_MATCH2);
@@ -2230,7 +2230,7 @@ static int gem_do_start(struct net_device *dev)
2230 gem_reset(gp); 2230 gem_reset(gp);
2231 gem_clean_rings(gp); 2231 gem_clean_rings(gp);
2232 gem_put_cell(gp); 2232 gem_put_cell(gp);
2233 2233
2234 spin_unlock(&gp->tx_lock); 2234 spin_unlock(&gp->tx_lock);
2235 spin_unlock_irqrestore(&gp->lock, flags); 2235 spin_unlock_irqrestore(&gp->lock, flags);
2236 2236
@@ -2343,12 +2343,12 @@ static int gem_close(struct net_device *dev)
2343 2343
2344 mutex_lock(&gp->pm_mutex); 2344 mutex_lock(&gp->pm_mutex);
2345 2345
2346 gp->opened = 0; 2346 gp->opened = 0;
2347 if (!gp->asleep) 2347 if (!gp->asleep)
2348 gem_do_stop(dev, 0); 2348 gem_do_stop(dev, 0);
2349 2349
2350 mutex_unlock(&gp->pm_mutex); 2350 mutex_unlock(&gp->pm_mutex);
2351 2351
2352 return 0; 2352 return 0;
2353} 2353}
2354 2354
@@ -2366,7 +2366,7 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2366 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", 2366 printk(KERN_INFO "%s: suspending, WakeOnLan %s\n",
2367 dev->name, 2367 dev->name,
2368 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); 2368 (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled");
2369 2369
2370 /* Keep the cell enabled during the entire operation */ 2370 /* Keep the cell enabled during the entire operation */
2371 spin_lock_irqsave(&gp->lock, flags); 2371 spin_lock_irqsave(&gp->lock, flags);
2372 spin_lock(&gp->tx_lock); 2372 spin_lock(&gp->tx_lock);
@@ -2486,7 +2486,7 @@ static int gem_resume(struct pci_dev *pdev)
2486 spin_unlock_irqrestore(&gp->lock, flags); 2486 spin_unlock_irqrestore(&gp->lock, flags);
2487 2487
2488 netif_poll_enable(dev); 2488 netif_poll_enable(dev);
2489 2489
2490 mutex_unlock(&gp->pm_mutex); 2490 mutex_unlock(&gp->pm_mutex);
2491 2491
2492 return 0; 2492 return 0;
@@ -2533,7 +2533,7 @@ static void gem_set_multicast(struct net_device *dev)
2533 struct gem *gp = dev->priv; 2533 struct gem *gp = dev->priv;
2534 u32 rxcfg, rxcfg_new; 2534 u32 rxcfg, rxcfg_new;
2535 int limit = 10000; 2535 int limit = 10000;
2536 2536
2537 2537
2538 spin_lock_irq(&gp->lock); 2538 spin_lock_irq(&gp->lock);
2539 spin_lock(&gp->tx_lock); 2539 spin_lock(&gp->tx_lock);
@@ -2549,7 +2549,7 @@ static void gem_set_multicast(struct net_device *dev)
2549 rxcfg_new |= MAC_RXCFG_SFCS; 2549 rxcfg_new |= MAC_RXCFG_SFCS;
2550#endif 2550#endif
2551 gp->mac_rx_cfg = rxcfg_new; 2551 gp->mac_rx_cfg = rxcfg_new;
2552 2552
2553 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG); 2553 writel(rxcfg & ~MAC_RXCFG_ENAB, gp->regs + MAC_RXCFG);
2554 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) { 2554 while (readl(gp->regs + MAC_RXCFG) & MAC_RXCFG_ENAB) {
2555 if (!limit--) 2555 if (!limit--)
@@ -2611,12 +2611,12 @@ static int gem_change_mtu(struct net_device *dev, int new_mtu)
2611static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2611static void gem_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2612{ 2612{
2613 struct gem *gp = dev->priv; 2613 struct gem *gp = dev->priv;
2614 2614
2615 strcpy(info->driver, DRV_NAME); 2615 strcpy(info->driver, DRV_NAME);
2616 strcpy(info->version, DRV_VERSION); 2616 strcpy(info->version, DRV_VERSION);
2617 strcpy(info->bus_info, pci_name(gp->pdev)); 2617 strcpy(info->bus_info, pci_name(gp->pdev));
2618} 2618}
2619 2619
2620static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 2620static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2621{ 2621{
2622 struct gem *gp = dev->priv; 2622 struct gem *gp = dev->priv;
@@ -2638,7 +2638,7 @@ static int gem_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2638 spin_lock_irq(&gp->lock); 2638 spin_lock_irq(&gp->lock);
2639 cmd->autoneg = gp->want_autoneg; 2639 cmd->autoneg = gp->want_autoneg;
2640 cmd->speed = gp->phy_mii.speed; 2640 cmd->speed = gp->phy_mii.speed;
2641 cmd->duplex = gp->phy_mii.duplex; 2641 cmd->duplex = gp->phy_mii.duplex;
2642 cmd->advertising = gp->phy_mii.advertising; 2642 cmd->advertising = gp->phy_mii.advertising;
2643 2643
2644 /* If we started with a forced mode, we don't have a default 2644 /* If we started with a forced mode, we don't have a default
@@ -2683,7 +2683,7 @@ static int gem_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2683 (cmd->duplex != DUPLEX_HALF && 2683 (cmd->duplex != DUPLEX_HALF &&
2684 cmd->duplex != DUPLEX_FULL))) 2684 cmd->duplex != DUPLEX_FULL)))
2685 return -EINVAL; 2685 return -EINVAL;
2686 2686
2687 /* Apply settings and restart link process. */ 2687 /* Apply settings and restart link process. */
2688 spin_lock_irq(&gp->lock); 2688 spin_lock_irq(&gp->lock);
2689 gem_get_cell(gp); 2689 gem_get_cell(gp);
@@ -2716,7 +2716,7 @@ static u32 gem_get_msglevel(struct net_device *dev)
2716 struct gem *gp = dev->priv; 2716 struct gem *gp = dev->priv;
2717 return gp->msg_enable; 2717 return gp->msg_enable;
2718} 2718}
2719 2719
2720static void gem_set_msglevel(struct net_device *dev, u32 value) 2720static void gem_set_msglevel(struct net_device *dev, u32 value)
2721{ 2721{
2722 struct gem *gp = dev->priv; 2722 struct gem *gp = dev->priv;
@@ -2776,7 +2776,7 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2776 * with power management. 2776 * with power management.
2777 */ 2777 */
2778 mutex_lock(&gp->pm_mutex); 2778 mutex_lock(&gp->pm_mutex);
2779 2779
2780 spin_lock_irqsave(&gp->lock, flags); 2780 spin_lock_irqsave(&gp->lock, flags);
2781 gem_get_cell(gp); 2781 gem_get_cell(gp);
2782 spin_unlock_irqrestore(&gp->lock, flags); 2782 spin_unlock_irqrestore(&gp->lock, flags);
@@ -2808,13 +2808,13 @@ static int gem_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2808 } 2808 }
2809 break; 2809 break;
2810 }; 2810 };
2811 2811
2812 spin_lock_irqsave(&gp->lock, flags); 2812 spin_lock_irqsave(&gp->lock, flags);
2813 gem_put_cell(gp); 2813 gem_put_cell(gp);
2814 spin_unlock_irqrestore(&gp->lock, flags); 2814 spin_unlock_irqrestore(&gp->lock, flags);
2815 2815
2816 mutex_unlock(&gp->pm_mutex); 2816 mutex_unlock(&gp->pm_mutex);
2817 2817
2818 return rc; 2818 return rc;
2819} 2819}
2820 2820
@@ -3000,7 +3000,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3000 } 3000 }
3001 pci_using_dac = 0; 3001 pci_using_dac = 0;
3002 } 3002 }
3003 3003
3004 gemreg_base = pci_resource_start(pdev, 0); 3004 gemreg_base = pci_resource_start(pdev, 0);
3005 gemreg_len = pci_resource_len(pdev, 0); 3005 gemreg_len = pci_resource_len(pdev, 0);
3006 3006
@@ -3044,7 +3044,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3044 gp->link_timer.data = (unsigned long) gp; 3044 gp->link_timer.data = (unsigned long) gp;
3045 3045
3046 INIT_WORK(&gp->reset_task, gem_reset_task, gp); 3046 INIT_WORK(&gp->reset_task, gem_reset_task, gp);
3047 3047
3048 gp->lstate = link_down; 3048 gp->lstate = link_down;
3049 gp->timer_ticks = 0; 3049 gp->timer_ticks = 0;
3050 netif_carrier_off(dev); 3050 netif_carrier_off(dev);
@@ -3153,7 +3153,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev,
3153 3153
3154 if (gp->phy_type == phy_mii_mdio0 || 3154 if (gp->phy_type == phy_mii_mdio0 ||
3155 gp->phy_type == phy_mii_mdio1) 3155 gp->phy_type == phy_mii_mdio1)
3156 printk(KERN_INFO "%s: Found %s PHY\n", dev->name, 3156 printk(KERN_INFO "%s: Found %s PHY\n", dev->name,
3157 gp->phy_mii.def ? gp->phy_mii.def->name : "no"); 3157 gp->phy_mii.def ? gp->phy_mii.def->name : "no");
3158 3158
3159 /* GEM can do it all... */ 3159 /* GEM can do it all... */