diff options
author | Joe Perches <joe@perches.com> | 2010-08-17 03:55:04 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-08-19 03:04:33 -0400 |
commit | c6c759884b05c7449a19d39c763a7482180e2def (patch) | |
tree | d1b30a1928143cad6d6e5f651ccaa2501979f009 /drivers/net/sungem.c | |
parent | 0b29b894b17747f7fa0c5668281c68b8cde7647b (diff) |
drivers/net/sungem: Use netdev_<level>, netif_<level> and pr_<level>
Use the current logging message styles.
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/sungem.c')
-rw-r--r-- | drivers/net/sungem.c | 211 |
1 files changed, 83 insertions, 128 deletions
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c index 434f9d735333..4ceb3cf6a9a9 100644 --- a/drivers/net/sungem.c +++ b/drivers/net/sungem.c | |||
@@ -31,6 +31,8 @@ | |||
31 | * about when we can start taking interrupts or get xmit() called... | 31 | * about when we can start taking interrupts or get xmit() called... |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt | ||
35 | |||
34 | #include <linux/module.h> | 36 | #include <linux/module.h> |
35 | #include <linux/kernel.h> | 37 | #include <linux/kernel.h> |
36 | #include <linux/types.h> | 38 | #include <linux/types.h> |
@@ -105,7 +107,6 @@ MODULE_DESCRIPTION("Sun GEM Gbit ethernet driver"); | |||
105 | MODULE_LICENSE("GPL"); | 107 | MODULE_LICENSE("GPL"); |
106 | 108 | ||
107 | #define GEM_MODULE_NAME "gem" | 109 | #define GEM_MODULE_NAME "gem" |
108 | #define PFX GEM_MODULE_NAME ": " | ||
109 | 110 | ||
110 | static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { | 111 | static DEFINE_PCI_DEVICE_TABLE(gem_pci_tbl) = { |
111 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, | 112 | { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_GEM, |
@@ -262,8 +263,7 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta | |||
262 | gp->dev->name, pcs_istat); | 263 | gp->dev->name, pcs_istat); |
263 | 264 | ||
264 | if (!(pcs_istat & PCS_ISTAT_LSC)) { | 265 | if (!(pcs_istat & PCS_ISTAT_LSC)) { |
265 | printk(KERN_ERR "%s: PCS irq but no link status change???\n", | 266 | netdev_err(dev, "PCS irq but no link status change???\n"); |
266 | dev->name); | ||
267 | return 0; | 267 | return 0; |
268 | } | 268 | } |
269 | 269 | ||
@@ -282,20 +282,16 @@ static int gem_pcs_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta | |||
282 | * when autoneg has completed. | 282 | * when autoneg has completed. |
283 | */ | 283 | */ |
284 | if (pcs_miistat & PCS_MIISTAT_RF) | 284 | if (pcs_miistat & PCS_MIISTAT_RF) |
285 | printk(KERN_INFO "%s: PCS AutoNEG complete, " | 285 | netdev_info(dev, "PCS AutoNEG complete, RemoteFault\n"); |
286 | "RemoteFault\n", dev->name); | ||
287 | else | 286 | else |
288 | printk(KERN_INFO "%s: PCS AutoNEG complete.\n", | 287 | netdev_info(dev, "PCS AutoNEG complete\n"); |
289 | dev->name); | ||
290 | } | 288 | } |
291 | 289 | ||
292 | if (pcs_miistat & PCS_MIISTAT_LS) { | 290 | if (pcs_miistat & PCS_MIISTAT_LS) { |
293 | printk(KERN_INFO "%s: PCS link is now up.\n", | 291 | netdev_info(dev, "PCS link is now up\n"); |
294 | dev->name); | ||
295 | netif_carrier_on(gp->dev); | 292 | netif_carrier_on(gp->dev); |
296 | } else { | 293 | } else { |
297 | printk(KERN_INFO "%s: PCS link is now down.\n", | 294 | netdev_info(dev, "PCS link is now down\n"); |
298 | dev->name); | ||
299 | netif_carrier_off(gp->dev); | 295 | netif_carrier_off(gp->dev); |
300 | /* If this happens and the link timer is not running, | 296 | /* If this happens and the link timer is not running, |
301 | * reset so we re-negotiate. | 297 | * reset so we re-negotiate. |
@@ -323,14 +319,12 @@ static int gem_txmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s | |||
323 | return 0; | 319 | return 0; |
324 | 320 | ||
325 | if (txmac_stat & MAC_TXSTAT_URUN) { | 321 | if (txmac_stat & MAC_TXSTAT_URUN) { |
326 | printk(KERN_ERR "%s: TX MAC xmit underrun.\n", | 322 | netdev_err(dev, "TX MAC xmit underrun\n"); |
327 | dev->name); | ||
328 | gp->net_stats.tx_fifo_errors++; | 323 | gp->net_stats.tx_fifo_errors++; |
329 | } | 324 | } |
330 | 325 | ||
331 | if (txmac_stat & MAC_TXSTAT_MPE) { | 326 | if (txmac_stat & MAC_TXSTAT_MPE) { |
332 | printk(KERN_ERR "%s: TX MAC max packet size error.\n", | 327 | netdev_err(dev, "TX MAC max packet size error\n"); |
333 | dev->name); | ||
334 | gp->net_stats.tx_errors++; | 328 | gp->net_stats.tx_errors++; |
335 | } | 329 | } |
336 | 330 | ||
@@ -377,8 +371,7 @@ static int gem_rxmac_reset(struct gem *gp) | |||
377 | udelay(10); | 371 | udelay(10); |
378 | } | 372 | } |
379 | if (limit == 5000) { | 373 | if (limit == 5000) { |
380 | printk(KERN_ERR "%s: RX MAC will not reset, resetting whole " | 374 | netdev_err(dev, "RX MAC will not reset, resetting whole chip\n"); |
381 | "chip.\n", dev->name); | ||
382 | return 1; | 375 | return 1; |
383 | } | 376 | } |
384 | 377 | ||
@@ -390,8 +383,7 @@ static int gem_rxmac_reset(struct gem *gp) | |||
390 | udelay(10); | 383 | udelay(10); |
391 | } | 384 | } |
392 | if (limit == 5000) { | 385 | if (limit == 5000) { |
393 | printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " | 386 | netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); |
394 | "chip.\n", dev->name); | ||
395 | return 1; | 387 | return 1; |
396 | } | 388 | } |
397 | 389 | ||
@@ -403,8 +395,7 @@ static int gem_rxmac_reset(struct gem *gp) | |||
403 | udelay(10); | 395 | udelay(10); |
404 | } | 396 | } |
405 | if (limit == 5000) { | 397 | if (limit == 5000) { |
406 | printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " | 398 | netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); |
407 | "chip.\n", dev->name); | ||
408 | return 1; | 399 | return 1; |
409 | } | 400 | } |
410 | 401 | ||
@@ -419,8 +410,7 @@ static int gem_rxmac_reset(struct gem *gp) | |||
419 | udelay(10); | 410 | udelay(10); |
420 | } | 411 | } |
421 | if (limit == 5000) { | 412 | if (limit == 5000) { |
422 | printk(KERN_ERR "%s: RX reset command will not execute, resetting " | 413 | netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); |
423 | "whole chip.\n", dev->name); | ||
424 | return 1; | 414 | return 1; |
425 | } | 415 | } |
426 | 416 | ||
@@ -429,8 +419,7 @@ static int gem_rxmac_reset(struct gem *gp) | |||
429 | struct gem_rxd *rxd = &gp->init_block->rxd[i]; | 419 | struct gem_rxd *rxd = &gp->init_block->rxd[i]; |
430 | 420 | ||
431 | if (gp->rx_skbs[i] == NULL) { | 421 | if (gp->rx_skbs[i] == NULL) { |
432 | printk(KERN_ERR "%s: Parts of RX ring empty, resetting " | 422 | netdev_err(dev, "Parts of RX ring empty, resetting whole chip\n"); |
433 | "whole chip.\n", dev->name); | ||
434 | return 1; | 423 | return 1; |
435 | } | 424 | } |
436 | 425 | ||
@@ -479,8 +468,7 @@ static int gem_rxmac_interrupt(struct net_device *dev, struct gem *gp, u32 gem_s | |||
479 | if (rxmac_stat & MAC_RXSTAT_OFLW) { | 468 | if (rxmac_stat & MAC_RXSTAT_OFLW) { |
480 | u32 smac = readl(gp->regs + MAC_SMACHINE); | 469 | u32 smac = readl(gp->regs + MAC_SMACHINE); |
481 | 470 | ||
482 | printk(KERN_ERR "%s: RX MAC fifo overflow smac[%08x].\n", | 471 | netdev_err(dev, "RX MAC fifo overflow smac[%08x]\n", smac); |
483 | dev->name, smac); | ||
484 | gp->net_stats.rx_over_errors++; | 472 | gp->net_stats.rx_over_errors++; |
485 | gp->net_stats.rx_fifo_errors++; | 473 | gp->net_stats.rx_fifo_errors++; |
486 | 474 | ||
@@ -542,19 +530,18 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta | |||
542 | 530 | ||
543 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && | 531 | if (gp->pdev->vendor == PCI_VENDOR_ID_SUN && |
544 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { | 532 | gp->pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
545 | printk(KERN_ERR "%s: PCI error [%04x] ", | 533 | netdev_err(dev, "PCI error [%04x]", pci_estat); |
546 | dev->name, pci_estat); | ||
547 | 534 | ||
548 | if (pci_estat & GREG_PCIESTAT_BADACK) | 535 | if (pci_estat & GREG_PCIESTAT_BADACK) |
549 | printk("<No ACK64# during ABS64 cycle> "); | 536 | pr_cont(" <No ACK64# during ABS64 cycle>"); |
550 | if (pci_estat & GREG_PCIESTAT_DTRTO) | 537 | if (pci_estat & GREG_PCIESTAT_DTRTO) |
551 | printk("<Delayed transaction timeout> "); | 538 | pr_cont(" <Delayed transaction timeout>"); |
552 | if (pci_estat & GREG_PCIESTAT_OTHER) | 539 | if (pci_estat & GREG_PCIESTAT_OTHER) |
553 | printk("<other>"); | 540 | pr_cont(" <other>"); |
554 | printk("\n"); | 541 | pr_cont("\n"); |
555 | } else { | 542 | } else { |
556 | pci_estat |= GREG_PCIESTAT_OTHER; | 543 | pci_estat |= GREG_PCIESTAT_OTHER; |
557 | printk(KERN_ERR "%s: PCI error\n", dev->name); | 544 | netdev_err(dev, "PCI error\n"); |
558 | } | 545 | } |
559 | 546 | ||
560 | if (pci_estat & GREG_PCIESTAT_OTHER) { | 547 | if (pci_estat & GREG_PCIESTAT_OTHER) { |
@@ -565,26 +552,20 @@ static int gem_pci_interrupt(struct net_device *dev, struct gem *gp, u32 gem_sta | |||
565 | */ | 552 | */ |
566 | pci_read_config_word(gp->pdev, PCI_STATUS, | 553 | pci_read_config_word(gp->pdev, PCI_STATUS, |
567 | &pci_cfg_stat); | 554 | &pci_cfg_stat); |
568 | printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", | 555 | netdev_err(dev, "Read PCI cfg space status [%04x]\n", |
569 | dev->name, pci_cfg_stat); | 556 | pci_cfg_stat); |
570 | if (pci_cfg_stat & PCI_STATUS_PARITY) | 557 | if (pci_cfg_stat & PCI_STATUS_PARITY) |
571 | printk(KERN_ERR "%s: PCI parity error detected.\n", | 558 | netdev_err(dev, "PCI parity error detected\n"); |
572 | dev->name); | ||
573 | if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) | 559 | if (pci_cfg_stat & PCI_STATUS_SIG_TARGET_ABORT) |
574 | printk(KERN_ERR "%s: PCI target abort.\n", | 560 | netdev_err(dev, "PCI target abort\n"); |
575 | dev->name); | ||
576 | if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) | 561 | if (pci_cfg_stat & PCI_STATUS_REC_TARGET_ABORT) |
577 | printk(KERN_ERR "%s: PCI master acks target abort.\n", | 562 | netdev_err(dev, "PCI master acks target abort\n"); |
578 | dev->name); | ||
579 | if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) | 563 | if (pci_cfg_stat & PCI_STATUS_REC_MASTER_ABORT) |
580 | printk(KERN_ERR "%s: PCI master abort.\n", | 564 | netdev_err(dev, "PCI master abort\n"); |
581 | dev->name); | ||
582 | if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) | 565 | if (pci_cfg_stat & PCI_STATUS_SIG_SYSTEM_ERROR) |
583 | printk(KERN_ERR "%s: PCI system error SERR#.\n", | 566 | netdev_err(dev, "PCI system error SERR#\n"); |
584 | dev->name); | ||
585 | if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) | 567 | if (pci_cfg_stat & PCI_STATUS_DETECTED_PARITY) |
586 | printk(KERN_ERR "%s: PCI parity error.\n", | 568 | netdev_err(dev, "PCI parity error\n"); |
587 | dev->name); | ||
588 | 569 | ||
589 | /* Write the error bits back to clear them. */ | 570 | /* Write the error bits back to clear them. */ |
590 | pci_cfg_stat &= (PCI_STATUS_PARITY | | 571 | pci_cfg_stat &= (PCI_STATUS_PARITY | |
@@ -874,8 +855,7 @@ static int gem_rx(struct gem *gp, int work_to_do) | |||
874 | gp->rx_new = entry; | 855 | gp->rx_new = entry; |
875 | 856 | ||
876 | if (drops) | 857 | if (drops) |
877 | printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", | 858 | netdev_info(gp->dev, "Memory squeeze, deferring packet\n"); |
878 | gp->dev->name); | ||
879 | 859 | ||
880 | return work_done; | 860 | return work_done; |
881 | } | 861 | } |
@@ -981,21 +961,19 @@ static void gem_tx_timeout(struct net_device *dev) | |||
981 | { | 961 | { |
982 | struct gem *gp = netdev_priv(dev); | 962 | struct gem *gp = netdev_priv(dev); |
983 | 963 | ||
984 | printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); | 964 | netdev_err(dev, "transmit timed out, resetting\n"); |
985 | if (!gp->running) { | 965 | if (!gp->running) { |
986 | printk("%s: hrm.. hw not running !\n", dev->name); | 966 | netdev_err(dev, "hrm.. hw not running !\n"); |
987 | return; | 967 | return; |
988 | } | 968 | } |
989 | printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x]\n", | 969 | netdev_err(dev, "TX_STATE[%08x:%08x:%08x]\n", |
990 | dev->name, | 970 | readl(gp->regs + TXDMA_CFG), |
991 | readl(gp->regs + TXDMA_CFG), | 971 | readl(gp->regs + MAC_TXSTAT), |
992 | readl(gp->regs + MAC_TXSTAT), | 972 | readl(gp->regs + MAC_TXCFG)); |
993 | readl(gp->regs + MAC_TXCFG)); | 973 | netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", |
994 | printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", | 974 | readl(gp->regs + RXDMA_CFG), |
995 | dev->name, | 975 | readl(gp->regs + MAC_RXSTAT), |
996 | readl(gp->regs + RXDMA_CFG), | 976 | readl(gp->regs + MAC_RXCFG)); |
997 | readl(gp->regs + MAC_RXSTAT), | ||
998 | readl(gp->regs + MAC_RXCFG)); | ||
999 | 977 | ||
1000 | spin_lock_irq(&gp->lock); | 978 | spin_lock_irq(&gp->lock); |
1001 | spin_lock(&gp->tx_lock); | 979 | spin_lock(&gp->tx_lock); |
@@ -1048,8 +1026,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb, | |||
1048 | if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { | 1026 | if (TX_BUFFS_AVAIL(gp) <= (skb_shinfo(skb)->nr_frags + 1)) { |
1049 | netif_stop_queue(dev); | 1027 | netif_stop_queue(dev); |
1050 | spin_unlock_irqrestore(&gp->tx_lock, flags); | 1028 | spin_unlock_irqrestore(&gp->tx_lock, flags); |
1051 | printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", | 1029 | netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); |
1052 | dev->name); | ||
1053 | return NETDEV_TX_BUSY; | 1030 | return NETDEV_TX_BUSY; |
1054 | } | 1031 | } |
1055 | 1032 | ||
@@ -1158,8 +1135,7 @@ static void gem_pcs_reset(struct gem *gp) | |||
1158 | break; | 1135 | break; |
1159 | } | 1136 | } |
1160 | if (limit < 0) | 1137 | if (limit < 0) |
1161 | printk(KERN_WARNING "%s: PCS reset bit would not clear.\n", | 1138 | netdev_warn(gp->dev, "PCS reset bit would not clear\n"); |
1162 | gp->dev->name); | ||
1163 | } | 1139 | } |
1164 | 1140 | ||
1165 | static void gem_pcs_reinit_adv(struct gem *gp) | 1141 | static void gem_pcs_reinit_adv(struct gem *gp) |
@@ -1230,7 +1206,7 @@ static void gem_reset(struct gem *gp) | |||
1230 | } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); | 1206 | } while (val & (GREG_SWRST_TXRST | GREG_SWRST_RXRST)); |
1231 | 1207 | ||
1232 | if (limit < 0) | 1208 | if (limit < 0) |
1233 | printk(KERN_ERR "%s: SW reset is ghetto.\n", gp->dev->name); | 1209 | netdev_err(gp->dev, "SW reset is ghetto\n"); |
1234 | 1210 | ||
1235 | if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) | 1211 | if (gp->phy_type == phy_serialink || gp->phy_type == phy_serdes) |
1236 | gem_pcs_reinit_adv(gp); | 1212 | gem_pcs_reinit_adv(gp); |
@@ -1395,9 +1371,8 @@ static int gem_set_link_modes(struct gem *gp) | |||
1395 | speed = SPEED_1000; | 1371 | speed = SPEED_1000; |
1396 | } | 1372 | } |
1397 | 1373 | ||
1398 | if (netif_msg_link(gp)) | 1374 | netif_info(gp, link, gp->dev, "Link is up at %d Mbps, %s-duplex\n", |
1399 | printk(KERN_INFO "%s: Link is up at %d Mbps, %s-duplex.\n", | 1375 | speed, (full_duplex ? "full" : "half")); |
1400 | gp->dev->name, speed, (full_duplex ? "full" : "half")); | ||
1401 | 1376 | ||
1402 | if (!gp->running) | 1377 | if (!gp->running) |
1403 | return 0; | 1378 | return 0; |
@@ -1451,15 +1426,13 @@ static int gem_set_link_modes(struct gem *gp) | |||
1451 | 1426 | ||
1452 | if (netif_msg_link(gp)) { | 1427 | if (netif_msg_link(gp)) { |
1453 | if (pause) { | 1428 | if (pause) { |
1454 | printk(KERN_INFO "%s: Pause is enabled " | 1429 | netdev_info(gp->dev, |
1455 | "(rxfifo: %d off: %d on: %d)\n", | 1430 | "Pause is enabled (rxfifo: %d off: %d on: %d)\n", |
1456 | gp->dev->name, | 1431 | gp->rx_fifo_sz, |
1457 | gp->rx_fifo_sz, | 1432 | gp->rx_pause_off, |
1458 | gp->rx_pause_off, | 1433 | gp->rx_pause_on); |
1459 | gp->rx_pause_on); | ||
1460 | } else { | 1434 | } else { |
1461 | printk(KERN_INFO "%s: Pause is disabled\n", | 1435 | netdev_info(gp->dev, "Pause is disabled\n"); |
1462 | gp->dev->name); | ||
1463 | } | 1436 | } |
1464 | } | 1437 | } |
1465 | 1438 | ||
@@ -1484,9 +1457,8 @@ static int gem_mdio_link_not_up(struct gem *gp) | |||
1484 | { | 1457 | { |
1485 | switch (gp->lstate) { | 1458 | switch (gp->lstate) { |
1486 | case link_force_ret: | 1459 | case link_force_ret: |
1487 | if (netif_msg_link(gp)) | 1460 | netif_info(gp, link, gp->dev, |
1488 | printk(KERN_INFO "%s: Autoneg failed again, keeping" | 1461 | "Autoneg failed again, keeping forced mode\n"); |
1489 | " forced mode\n", gp->dev->name); | ||
1490 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, | 1462 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, |
1491 | gp->last_forced_speed, DUPLEX_HALF); | 1463 | gp->last_forced_speed, DUPLEX_HALF); |
1492 | gp->timer_ticks = 5; | 1464 | gp->timer_ticks = 5; |
@@ -1499,9 +1471,7 @@ static int gem_mdio_link_not_up(struct gem *gp) | |||
1499 | */ | 1471 | */ |
1500 | if (gp->phy_mii.def->magic_aneg) | 1472 | if (gp->phy_mii.def->magic_aneg) |
1501 | return 1; | 1473 | return 1; |
1502 | if (netif_msg_link(gp)) | 1474 | netif_info(gp, link, gp->dev, "switching to forced 100bt\n"); |
1503 | printk(KERN_INFO "%s: switching to forced 100bt\n", | ||
1504 | gp->dev->name); | ||
1505 | /* Try forced modes. */ | 1475 | /* Try forced modes. */ |
1506 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, | 1476 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_100, |
1507 | DUPLEX_HALF); | 1477 | DUPLEX_HALF); |
@@ -1517,9 +1487,8 @@ static int gem_mdio_link_not_up(struct gem *gp) | |||
1517 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, | 1487 | gp->phy_mii.def->ops->setup_forced(&gp->phy_mii, SPEED_10, |
1518 | DUPLEX_HALF); | 1488 | DUPLEX_HALF); |
1519 | gp->timer_ticks = 5; | 1489 | gp->timer_ticks = 5; |
1520 | if (netif_msg_link(gp)) | 1490 | netif_info(gp, link, gp->dev, |
1521 | printk(KERN_INFO "%s: switching to forced 10bt\n", | 1491 | "switching to forced 10bt\n"); |
1522 | gp->dev->name); | ||
1523 | return 0; | 1492 | return 0; |
1524 | } else | 1493 | } else |
1525 | return 1; | 1494 | return 1; |
@@ -1574,8 +1543,8 @@ static void gem_link_timer(unsigned long data) | |||
1574 | gp->last_forced_speed = gp->phy_mii.speed; | 1543 | gp->last_forced_speed = gp->phy_mii.speed; |
1575 | gp->timer_ticks = 5; | 1544 | gp->timer_ticks = 5; |
1576 | if (netif_msg_link(gp)) | 1545 | if (netif_msg_link(gp)) |
1577 | printk(KERN_INFO "%s: Got link after fallback, retrying" | 1546 | netdev_info(gp->dev, |
1578 | " autoneg once...\n", gp->dev->name); | 1547 | "Got link after fallback, retrying autoneg once...\n"); |
1579 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); | 1548 | gp->phy_mii.def->ops->setup_aneg(&gp->phy_mii, gp->phy_mii.advertising); |
1580 | } else if (gp->lstate != link_up) { | 1549 | } else if (gp->lstate != link_up) { |
1581 | gp->lstate = link_up; | 1550 | gp->lstate = link_up; |
@@ -1589,9 +1558,7 @@ static void gem_link_timer(unsigned long data) | |||
1589 | */ | 1558 | */ |
1590 | if (gp->lstate == link_up) { | 1559 | if (gp->lstate == link_up) { |
1591 | gp->lstate = link_down; | 1560 | gp->lstate = link_down; |
1592 | if (netif_msg_link(gp)) | 1561 | netif_info(gp, link, gp->dev, "Link down\n"); |
1593 | printk(KERN_INFO "%s: Link down\n", | ||
1594 | gp->dev->name); | ||
1595 | netif_carrier_off(gp->dev); | 1562 | netif_carrier_off(gp->dev); |
1596 | gp->reset_task_pending = 1; | 1563 | gp->reset_task_pending = 1; |
1597 | schedule_work(&gp->reset_task); | 1564 | schedule_work(&gp->reset_task); |
@@ -1746,8 +1713,7 @@ static void gem_init_phy(struct gem *gp) | |||
1746 | if (phy_read(gp, MII_BMCR) != 0xffff) | 1713 | if (phy_read(gp, MII_BMCR) != 0xffff) |
1747 | break; | 1714 | break; |
1748 | if (i == 2) | 1715 | if (i == 2) |
1749 | printk(KERN_WARNING "%s: GMAC PHY not responding !\n", | 1716 | netdev_warn(gp->dev, "GMAC PHY not responding !\n"); |
1750 | gp->dev->name); | ||
1751 | } | 1717 | } |
1752 | } | 1718 | } |
1753 | 1719 | ||
@@ -2038,7 +2004,7 @@ static int gem_check_invariants(struct gem *gp) | |||
2038 | * as this chip has no gigabit PHY. | 2004 | * as this chip has no gigabit PHY. |
2039 | */ | 2005 | */ |
2040 | if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { | 2006 | if ((mif_cfg & (MIF_CFG_MDI0 | MIF_CFG_MDI1)) == 0) { |
2041 | printk(KERN_ERR PFX "RIO GEM lacks MII phy, mif_cfg[%08x]\n", | 2007 | pr_err("RIO GEM lacks MII phy, mif_cfg[%08x]\n", |
2042 | mif_cfg); | 2008 | mif_cfg); |
2043 | return -1; | 2009 | return -1; |
2044 | } | 2010 | } |
@@ -2078,7 +2044,7 @@ static int gem_check_invariants(struct gem *gp) | |||
2078 | } | 2044 | } |
2079 | if (i == 32) { | 2045 | if (i == 32) { |
2080 | if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { | 2046 | if (pdev->device != PCI_DEVICE_ID_SUN_GEM) { |
2081 | printk(KERN_ERR PFX "RIO MII phy will not respond.\n"); | 2047 | pr_err("RIO MII phy will not respond\n"); |
2082 | return -1; | 2048 | return -1; |
2083 | } | 2049 | } |
2084 | gp->phy_type = phy_serdes; | 2050 | gp->phy_type = phy_serdes; |
@@ -2093,7 +2059,7 @@ static int gem_check_invariants(struct gem *gp) | |||
2093 | if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { | 2059 | if (pdev->device == PCI_DEVICE_ID_SUN_GEM) { |
2094 | if (gp->tx_fifo_sz != (9 * 1024) || | 2060 | if (gp->tx_fifo_sz != (9 * 1024) || |
2095 | gp->rx_fifo_sz != (20 * 1024)) { | 2061 | gp->rx_fifo_sz != (20 * 1024)) { |
2096 | printk(KERN_ERR PFX "GEM has bogus fifo sizes tx(%d) rx(%d)\n", | 2062 | pr_err("GEM has bogus fifo sizes tx(%d) rx(%d)\n", |
2097 | gp->tx_fifo_sz, gp->rx_fifo_sz); | 2063 | gp->tx_fifo_sz, gp->rx_fifo_sz); |
2098 | return -1; | 2064 | return -1; |
2099 | } | 2065 | } |
@@ -2101,7 +2067,7 @@ static int gem_check_invariants(struct gem *gp) | |||
2101 | } else { | 2067 | } else { |
2102 | if (gp->tx_fifo_sz != (2 * 1024) || | 2068 | if (gp->tx_fifo_sz != (2 * 1024) || |
2103 | gp->rx_fifo_sz != (2 * 1024)) { | 2069 | gp->rx_fifo_sz != (2 * 1024)) { |
2104 | printk(KERN_ERR PFX "RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", | 2070 | pr_err("RIO GEM has bogus fifo sizes tx(%d) rx(%d)\n", |
2105 | gp->tx_fifo_sz, gp->rx_fifo_sz); | 2071 | gp->tx_fifo_sz, gp->rx_fifo_sz); |
2106 | return -1; | 2072 | return -1; |
2107 | } | 2073 | } |
@@ -2239,7 +2205,7 @@ static int gem_do_start(struct net_device *dev) | |||
2239 | 2205 | ||
2240 | if (request_irq(gp->pdev->irq, gem_interrupt, | 2206 | if (request_irq(gp->pdev->irq, gem_interrupt, |
2241 | IRQF_SHARED, dev->name, (void *)dev)) { | 2207 | IRQF_SHARED, dev->name, (void *)dev)) { |
2242 | printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name); | 2208 | netdev_err(dev, "failed to request irq !\n"); |
2243 | 2209 | ||
2244 | spin_lock_irqsave(&gp->lock, flags); | 2210 | spin_lock_irqsave(&gp->lock, flags); |
2245 | spin_lock(&gp->tx_lock); | 2211 | spin_lock(&gp->tx_lock); |
@@ -2378,9 +2344,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state) | |||
2378 | 2344 | ||
2379 | mutex_lock(&gp->pm_mutex); | 2345 | mutex_lock(&gp->pm_mutex); |
2380 | 2346 | ||
2381 | printk(KERN_INFO "%s: suspending, WakeOnLan %s\n", | 2347 | netdev_info(dev, "suspending, WakeOnLan %s\n", |
2382 | dev->name, | 2348 | (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); |
2383 | (gp->wake_on_lan && gp->opened) ? "enabled" : "disabled"); | ||
2384 | 2349 | ||
2385 | /* Keep the cell enabled during the entire operation */ | 2350 | /* Keep the cell enabled during the entire operation */ |
2386 | spin_lock_irqsave(&gp->lock, flags); | 2351 | spin_lock_irqsave(&gp->lock, flags); |
@@ -2440,7 +2405,7 @@ static int gem_resume(struct pci_dev *pdev) | |||
2440 | struct gem *gp = netdev_priv(dev); | 2405 | struct gem *gp = netdev_priv(dev); |
2441 | unsigned long flags; | 2406 | unsigned long flags; |
2442 | 2407 | ||
2443 | printk(KERN_INFO "%s: resuming\n", dev->name); | 2408 | netdev_info(dev, "resuming\n"); |
2444 | 2409 | ||
2445 | mutex_lock(&gp->pm_mutex); | 2410 | mutex_lock(&gp->pm_mutex); |
2446 | 2411 | ||
@@ -2452,8 +2417,7 @@ static int gem_resume(struct pci_dev *pdev) | |||
2452 | 2417 | ||
2453 | /* Make sure PCI access and bus master are enabled */ | 2418 | /* Make sure PCI access and bus master are enabled */ |
2454 | if (pci_enable_device(gp->pdev)) { | 2419 | if (pci_enable_device(gp->pdev)) { |
2455 | printk(KERN_ERR "%s: Can't re-enable chip !\n", | 2420 | netdev_err(dev, "Can't re-enable chip !\n"); |
2456 | dev->name); | ||
2457 | /* Put cell and forget it for now, it will be considered as | 2421 | /* Put cell and forget it for now, it will be considered as |
2458 | * still asleep, a new sleep cycle may bring it back | 2422 | * still asleep, a new sleep cycle may bring it back |
2459 | */ | 2423 | */ |
@@ -2938,7 +2902,7 @@ static int __devinit gem_get_device_address(struct gem *gp) | |||
2938 | addr = idprom->id_ethaddr; | 2902 | addr = idprom->id_ethaddr; |
2939 | #else | 2903 | #else |
2940 | printk("\n"); | 2904 | printk("\n"); |
2941 | printk(KERN_ERR "%s: can't get mac-address\n", dev->name); | 2905 | pr_err("%s: can't get mac-address\n", dev->name); |
2942 | return -1; | 2906 | return -1; |
2943 | #endif | 2907 | #endif |
2944 | } | 2908 | } |
@@ -3009,14 +2973,12 @@ static const struct net_device_ops gem_netdev_ops = { | |||
3009 | static int __devinit gem_init_one(struct pci_dev *pdev, | 2973 | static int __devinit gem_init_one(struct pci_dev *pdev, |
3010 | const struct pci_device_id *ent) | 2974 | const struct pci_device_id *ent) |
3011 | { | 2975 | { |
3012 | static int gem_version_printed = 0; | ||
3013 | unsigned long gemreg_base, gemreg_len; | 2976 | unsigned long gemreg_base, gemreg_len; |
3014 | struct net_device *dev; | 2977 | struct net_device *dev; |
3015 | struct gem *gp; | 2978 | struct gem *gp; |
3016 | int err, pci_using_dac; | 2979 | int err, pci_using_dac; |
3017 | 2980 | ||
3018 | if (gem_version_printed++ == 0) | 2981 | printk_once(KERN_INFO "%s", version); |
3019 | printk(KERN_INFO "%s", version); | ||
3020 | 2982 | ||
3021 | /* Apple gmac note: during probe, the chip is powered up by | 2983 | /* Apple gmac note: during probe, the chip is powered up by |
3022 | * the arch code to allow the code below to work (and to let | 2984 | * the arch code to allow the code below to work (and to let |
@@ -3026,8 +2988,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3026 | */ | 2988 | */ |
3027 | err = pci_enable_device(pdev); | 2989 | err = pci_enable_device(pdev); |
3028 | if (err) { | 2990 | if (err) { |
3029 | printk(KERN_ERR PFX "Cannot enable MMIO operation, " | 2991 | pr_err("Cannot enable MMIO operation, aborting\n"); |
3030 | "aborting.\n"); | ||
3031 | return err; | 2992 | return err; |
3032 | } | 2993 | } |
3033 | pci_set_master(pdev); | 2994 | pci_set_master(pdev); |
@@ -3048,8 +3009,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3048 | } else { | 3009 | } else { |
3049 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); | 3010 | err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3050 | if (err) { | 3011 | if (err) { |
3051 | printk(KERN_ERR PFX "No usable DMA configuration, " | 3012 | pr_err("No usable DMA configuration, aborting\n"); |
3052 | "aborting.\n"); | ||
3053 | goto err_disable_device; | 3013 | goto err_disable_device; |
3054 | } | 3014 | } |
3055 | pci_using_dac = 0; | 3015 | pci_using_dac = 0; |
@@ -3059,15 +3019,14 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3059 | gemreg_len = pci_resource_len(pdev, 0); | 3019 | gemreg_len = pci_resource_len(pdev, 0); |
3060 | 3020 | ||
3061 | if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { | 3021 | if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { |
3062 | printk(KERN_ERR PFX "Cannot find proper PCI device " | 3022 | pr_err("Cannot find proper PCI device base address, aborting\n"); |
3063 | "base address, aborting.\n"); | ||
3064 | err = -ENODEV; | 3023 | err = -ENODEV; |
3065 | goto err_disable_device; | 3024 | goto err_disable_device; |
3066 | } | 3025 | } |
3067 | 3026 | ||
3068 | dev = alloc_etherdev(sizeof(*gp)); | 3027 | dev = alloc_etherdev(sizeof(*gp)); |
3069 | if (!dev) { | 3028 | if (!dev) { |
3070 | printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); | 3029 | pr_err("Etherdev alloc failed, aborting\n"); |
3071 | err = -ENOMEM; | 3030 | err = -ENOMEM; |
3072 | goto err_disable_device; | 3031 | goto err_disable_device; |
3073 | } | 3032 | } |
@@ -3077,8 +3036,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3077 | 3036 | ||
3078 | err = pci_request_regions(pdev, DRV_NAME); | 3037 | err = pci_request_regions(pdev, DRV_NAME); |
3079 | if (err) { | 3038 | if (err) { |
3080 | printk(KERN_ERR PFX "Cannot obtain PCI resources, " | 3039 | pr_err("Cannot obtain PCI resources, aborting\n"); |
3081 | "aborting.\n"); | ||
3082 | goto err_out_free_netdev; | 3040 | goto err_out_free_netdev; |
3083 | } | 3041 | } |
3084 | 3042 | ||
@@ -3104,8 +3062,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3104 | 3062 | ||
3105 | gp->regs = ioremap(gemreg_base, gemreg_len); | 3063 | gp->regs = ioremap(gemreg_base, gemreg_len); |
3106 | if (!gp->regs) { | 3064 | if (!gp->regs) { |
3107 | printk(KERN_ERR PFX "Cannot map device registers, " | 3065 | pr_err("Cannot map device registers, aborting\n"); |
3108 | "aborting.\n"); | ||
3109 | err = -EIO; | 3066 | err = -EIO; |
3110 | goto err_out_free_res; | 3067 | goto err_out_free_res; |
3111 | } | 3068 | } |
@@ -3150,8 +3107,7 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3150 | pci_alloc_consistent(pdev, sizeof(struct gem_init_block), | 3107 | pci_alloc_consistent(pdev, sizeof(struct gem_init_block), |
3151 | &gp->gblock_dvma); | 3108 | &gp->gblock_dvma); |
3152 | if (!gp->init_block) { | 3109 | if (!gp->init_block) { |
3153 | printk(KERN_ERR PFX "Cannot allocate init block, " | 3110 | pr_err("Cannot allocate init block, aborting\n"); |
3154 | "aborting.\n"); | ||
3155 | err = -ENOMEM; | 3111 | err = -ENOMEM; |
3156 | goto err_out_iounmap; | 3112 | goto err_out_iounmap; |
3157 | } | 3113 | } |
@@ -3180,19 +3136,18 @@ static int __devinit gem_init_one(struct pci_dev *pdev, | |||
3180 | 3136 | ||
3181 | /* Register with kernel */ | 3137 | /* Register with kernel */ |
3182 | if (register_netdev(dev)) { | 3138 | if (register_netdev(dev)) { |
3183 | printk(KERN_ERR PFX "Cannot register net device, " | 3139 | pr_err("Cannot register net device, aborting\n"); |
3184 | "aborting.\n"); | ||
3185 | err = -ENOMEM; | 3140 | err = -ENOMEM; |
3186 | goto err_out_free_consistent; | 3141 | goto err_out_free_consistent; |
3187 | } | 3142 | } |
3188 | 3143 | ||
3189 | printk(KERN_INFO "%s: Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", | 3144 | netdev_info(dev, "Sun GEM (PCI) 10/100/1000BaseT Ethernet %pM\n", |
3190 | dev->name, dev->dev_addr); | 3145 | dev->dev_addr); |
3191 | 3146 | ||
3192 | if (gp->phy_type == phy_mii_mdio0 || | 3147 | if (gp->phy_type == phy_mii_mdio0 || |
3193 | gp->phy_type == phy_mii_mdio1) | 3148 | gp->phy_type == phy_mii_mdio1) |
3194 | printk(KERN_INFO "%s: Found %s PHY\n", dev->name, | 3149 | netdev_info(dev, "Found %s PHY\n", |
3195 | gp->phy_mii.def ? gp->phy_mii.def->name : "no"); | 3150 | gp->phy_mii.def ? gp->phy_mii.def->name : "no"); |
3196 | 3151 | ||
3197 | /* GEM can do it all... */ | 3152 | /* GEM can do it all... */ |
3198 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; | 3153 | dev->features |= NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_LLTX; |