aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/cassini.c
diff options
context:
space:
mode:
authorJoe Perches <joe@perches.com>2010-02-17 10:01:53 -0500
committerDavid S. Miller <davem@davemloft.net>2010-02-17 20:45:20 -0500
commit436d27d1db147713e4cd17b2fa491d22374bda98 (patch)
tree2d4a0deb80070b3cee47a5626f883223dd684e26 /drivers/net/cassini.c
parent7995c64e5b56ec7fe6032e5fc586f726cde2152b (diff)
drivers/net/cassini.c: Use (pr|netdev|netif)_<level> macro helpers
Add #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt Remove #define PFX Use pr_<level> Use netdev_<level> Use netif_<level> Remove periods from formats Coalesce long formats Signed-off-by: Joe Perches <joe@perches.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/cassini.c')
-rw-r--r--drivers/net/cassini.c361
1 files changed, 151 insertions, 210 deletions
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index f90cac43130d..7cbcfb0ade1c 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -66,6 +66,7 @@
66 * by default, the selective clear mask is set up to process rx packets. 66 * by default, the selective clear mask is set up to process rx packets.
67 */ 67 */
68 68
69#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
69 70
70#include <linux/module.h> 71#include <linux/module.h>
71#include <linux/kernel.h> 72#include <linux/kernel.h>
@@ -143,7 +144,6 @@
143#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ 144#undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */
144 145
145#define DRV_MODULE_NAME "cassini" 146#define DRV_MODULE_NAME "cassini"
146#define PFX DRV_MODULE_NAME ": "
147#define DRV_MODULE_VERSION "1.6" 147#define DRV_MODULE_VERSION "1.6"
148#define DRV_MODULE_RELDATE "21 May 2008" 148#define DRV_MODULE_RELDATE "21 May 2008"
149 149
@@ -649,9 +649,8 @@ static cas_page_t *cas_page_dequeue(struct cas *cp)
649 cas_spare_recover(cp, GFP_ATOMIC); 649 cas_spare_recover(cp, GFP_ATOMIC);
650 spin_lock(&cp->rx_spare_lock); 650 spin_lock(&cp->rx_spare_lock);
651 if (list_empty(&cp->rx_spare_list)) { 651 if (list_empty(&cp->rx_spare_list)) {
652 if (netif_msg_rx_err(cp)) 652 netif_err(cp, rx_err, cp->dev,
653 printk(KERN_ERR "%s: no spare buffers " 653 "no spare buffers available\n");
654 "available.\n", cp->dev->name);
655 spin_unlock(&cp->rx_spare_lock); 654 spin_unlock(&cp->rx_spare_lock);
656 return NULL; 655 return NULL;
657 } 656 }
@@ -728,12 +727,10 @@ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep)
728#endif 727#endif
729start_aneg: 728start_aneg:
730 if (cp->lstate == link_up) { 729 if (cp->lstate == link_up) {
731 printk(KERN_INFO "%s: PCS link down.\n", 730 netdev_info(cp->dev, "PCS link down\n");
732 cp->dev->name);
733 } else { 731 } else {
734 if (changed) { 732 if (changed) {
735 printk(KERN_INFO "%s: link configuration changed\n", 733 netdev_info(cp->dev, "link configuration changed\n");
736 cp->dev->name);
737 } 734 }
738 } 735 }
739 cp->lstate = link_down; 736 cp->lstate = link_down;
@@ -826,12 +823,12 @@ static int cas_saturn_firmware_init(struct cas *cp)
826 823
827 err = request_firmware(&fw, fw_name, &cp->pdev->dev); 824 err = request_firmware(&fw, fw_name, &cp->pdev->dev);
828 if (err) { 825 if (err) {
829 printk(KERN_ERR "cassini: Failed to load firmware \"%s\"\n", 826 pr_err("Failed to load firmware \"%s\"\n",
830 fw_name); 827 fw_name);
831 return err; 828 return err;
832 } 829 }
833 if (fw->size < 2) { 830 if (fw->size < 2) {
834 printk(KERN_ERR "cassini: bogus length %zu in \"%s\"\n", 831 pr_err("bogus length %zu in \"%s\"\n",
835 fw->size, fw_name); 832 fw->size, fw_name);
836 err = -EINVAL; 833 err = -EINVAL;
837 goto out; 834 goto out;
@@ -841,7 +838,7 @@ static int cas_saturn_firmware_init(struct cas *cp)
841 cp->fw_data = vmalloc(cp->fw_size); 838 cp->fw_data = vmalloc(cp->fw_size);
842 if (!cp->fw_data) { 839 if (!cp->fw_data) {
843 err = -ENOMEM; 840 err = -ENOMEM;
844 printk(KERN_ERR "cassini: \"%s\" Failed %d\n", fw_name, err); 841 pr_err("\"%s\" Failed %d\n", fw_name, err);
845 goto out; 842 goto out;
846 } 843 }
847 memcpy(cp->fw_data, &fw->data[2], cp->fw_size); 844 memcpy(cp->fw_data, &fw->data[2], cp->fw_size);
@@ -986,9 +983,8 @@ static void cas_phy_init(struct cas *cp)
986 break; 983 break;
987 } 984 }
988 if (limit <= 0) 985 if (limit <= 0)
989 printk(KERN_WARNING "%s: PCS reset bit would not " 986 netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n",
990 "clear [%08x].\n", cp->dev->name, 987 readl(cp->regs + REG_PCS_STATE_MACHINE));
991 readl(cp->regs + REG_PCS_STATE_MACHINE));
992 988
993 /* Make sure PCS is disabled while changing advertisement 989 /* Make sure PCS is disabled while changing advertisement
994 * configuration. 990 * configuration.
@@ -1030,11 +1026,8 @@ static int cas_pcs_link_check(struct cas *cp)
1030 */ 1026 */
1031 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | 1027 if ((stat & (PCS_MII_STATUS_AUTONEG_COMP |
1032 PCS_MII_STATUS_REMOTE_FAULT)) == 1028 PCS_MII_STATUS_REMOTE_FAULT)) ==
1033 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) { 1029 (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT))
1034 if (netif_msg_link(cp)) 1030 netif_info(cp, link, cp->dev, "PCS RemoteFault\n");
1035 printk(KERN_INFO "%s: PCS RemoteFault\n",
1036 cp->dev->name);
1037 }
1038 1031
1039 /* work around link detection issue by querying the PCS state 1032 /* work around link detection issue by querying the PCS state
1040 * machine directly. 1033 * machine directly.
@@ -1081,10 +1074,8 @@ static int cas_pcs_link_check(struct cas *cp)
1081 cp->link_transition = LINK_TRANSITION_ON_FAILURE; 1074 cp->link_transition = LINK_TRANSITION_ON_FAILURE;
1082 } 1075 }
1083 netif_carrier_off(cp->dev); 1076 netif_carrier_off(cp->dev);
1084 if (cp->opened && netif_msg_link(cp)) { 1077 if (cp->opened)
1085 printk(KERN_INFO "%s: PCS link down.\n", 1078 netif_info(cp, link, cp->dev, "PCS link down\n");
1086 cp->dev->name);
1087 }
1088 1079
1089 /* Cassini only: if you force a mode, there can be 1080 /* Cassini only: if you force a mode, there can be
1090 * sync problems on link down. to fix that, the following 1081 * sync problems on link down. to fix that, the following
@@ -1139,9 +1130,8 @@ static int cas_txmac_interrupt(struct net_device *dev,
1139 if (!txmac_stat) 1130 if (!txmac_stat)
1140 return 0; 1131 return 0;
1141 1132
1142 if (netif_msg_intr(cp)) 1133 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1143 printk(KERN_DEBUG "%s: txmac interrupt, txmac_stat: 0x%x\n", 1134 "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat);
1144 cp->dev->name, txmac_stat);
1145 1135
1146 /* Defer timer expiration is quite normal, 1136 /* Defer timer expiration is quite normal,
1147 * don't even log the event. 1137 * don't even log the event.
@@ -1152,14 +1142,12 @@ static int cas_txmac_interrupt(struct net_device *dev,
1152 1142
1153 spin_lock(&cp->stat_lock[0]); 1143 spin_lock(&cp->stat_lock[0]);
1154 if (txmac_stat & MAC_TX_UNDERRUN) { 1144 if (txmac_stat & MAC_TX_UNDERRUN) {
1155 printk(KERN_ERR "%s: TX MAC xmit underrun.\n", 1145 netdev_err(dev, "TX MAC xmit underrun\n");
1156 dev->name);
1157 cp->net_stats[0].tx_fifo_errors++; 1146 cp->net_stats[0].tx_fifo_errors++;
1158 } 1147 }
1159 1148
1160 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { 1149 if (txmac_stat & MAC_TX_MAX_PACKET_ERR) {
1161 printk(KERN_ERR "%s: TX MAC max packet size error.\n", 1150 netdev_err(dev, "TX MAC max packet size error\n");
1162 dev->name);
1163 cp->net_stats[0].tx_errors++; 1151 cp->net_stats[0].tx_errors++;
1164 } 1152 }
1165 1153
@@ -1487,8 +1475,7 @@ static int cas_rxmac_reset(struct cas *cp)
1487 udelay(10); 1475 udelay(10);
1488 } 1476 }
1489 if (limit == STOP_TRIES) { 1477 if (limit == STOP_TRIES) {
1490 printk(KERN_ERR "%s: RX MAC will not disable, resetting whole " 1478 netdev_err(dev, "RX MAC will not disable, resetting whole chip\n");
1491 "chip.\n", dev->name);
1492 return 1; 1479 return 1;
1493 } 1480 }
1494 1481
@@ -1500,8 +1487,7 @@ static int cas_rxmac_reset(struct cas *cp)
1500 udelay(10); 1487 udelay(10);
1501 } 1488 }
1502 if (limit == STOP_TRIES) { 1489 if (limit == STOP_TRIES) {
1503 printk(KERN_ERR "%s: RX DMA will not disable, resetting whole " 1490 netdev_err(dev, "RX DMA will not disable, resetting whole chip\n");
1504 "chip.\n", dev->name);
1505 return 1; 1491 return 1;
1506 } 1492 }
1507 1493
@@ -1515,8 +1501,7 @@ static int cas_rxmac_reset(struct cas *cp)
1515 udelay(10); 1501 udelay(10);
1516 } 1502 }
1517 if (limit == STOP_TRIES) { 1503 if (limit == STOP_TRIES) {
1518 printk(KERN_ERR "%s: RX reset command will not execute, " 1504 netdev_err(dev, "RX reset command will not execute, resetting whole chip\n");
1519 "resetting whole chip.\n", dev->name);
1520 return 1; 1505 return 1;
1521 } 1506 }
1522 1507
@@ -1545,9 +1530,7 @@ static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp,
1545 if (!stat) 1530 if (!stat)
1546 return 0; 1531 return 0;
1547 1532
1548 if (netif_msg_intr(cp)) 1533 netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat);
1549 printk(KERN_DEBUG "%s: rxmac interrupt, stat: 0x%x\n",
1550 cp->dev->name, stat);
1551 1534
1552 /* these are all rollovers */ 1535 /* these are all rollovers */
1553 spin_lock(&cp->stat_lock[0]); 1536 spin_lock(&cp->stat_lock[0]);
@@ -1580,9 +1563,8 @@ static int cas_mac_interrupt(struct net_device *dev, struct cas *cp,
1580 if (!stat) 1563 if (!stat)
1581 return 0; 1564 return 0;
1582 1565
1583 if (netif_msg_intr(cp)) 1566 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1584 printk(KERN_DEBUG "%s: mac interrupt, stat: 0x%x\n", 1567 "mac interrupt, stat: 0x%x\n", stat);
1585 cp->dev->name, stat);
1586 1568
1587 /* This interrupt is just for pause frame and pause 1569 /* This interrupt is just for pause frame and pause
1588 * tracking. It is useful for diagnostics and debug 1570 * tracking. It is useful for diagnostics and debug
@@ -1605,9 +1587,7 @@ static inline int cas_mdio_link_not_up(struct cas *cp)
1605 1587
1606 switch (cp->lstate) { 1588 switch (cp->lstate) {
1607 case link_force_ret: 1589 case link_force_ret:
1608 if (netif_msg_link(cp)) 1590 netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n");
1609 printk(KERN_INFO "%s: Autoneg failed again, keeping"
1610 " forced mode\n", cp->dev->name);
1611 cas_phy_write(cp, MII_BMCR, cp->link_fcntl); 1591 cas_phy_write(cp, MII_BMCR, cp->link_fcntl);
1612 cp->timer_ticks = 5; 1592 cp->timer_ticks = 5;
1613 cp->lstate = link_force_ok; 1593 cp->lstate = link_force_ok;
@@ -1675,9 +1655,9 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1675 cas_mif_poll(cp, 0); 1655 cas_mif_poll(cp, 0);
1676 cp->link_fcntl = cas_phy_read(cp, MII_BMCR); 1656 cp->link_fcntl = cas_phy_read(cp, MII_BMCR);
1677 cp->timer_ticks = 5; 1657 cp->timer_ticks = 5;
1678 if (cp->opened && netif_msg_link(cp)) 1658 if (cp->opened)
1679 printk(KERN_INFO "%s: Got link after fallback, retrying" 1659 netif_info(cp, link, cp->dev,
1680 " autoneg once...\n", cp->dev->name); 1660 "Got link after fallback, retrying autoneg once...\n");
1681 cas_phy_write(cp, MII_BMCR, 1661 cas_phy_write(cp, MII_BMCR,
1682 cp->link_fcntl | BMCR_ANENABLE | 1662 cp->link_fcntl | BMCR_ANENABLE |
1683 BMCR_ANRESTART); 1663 BMCR_ANRESTART);
@@ -1704,9 +1684,8 @@ static int cas_mii_link_check(struct cas *cp, const u16 bmsr)
1704 cp->link_transition = LINK_TRANSITION_LINK_DOWN; 1684 cp->link_transition = LINK_TRANSITION_LINK_DOWN;
1705 1685
1706 netif_carrier_off(cp->dev); 1686 netif_carrier_off(cp->dev);
1707 if (cp->opened && netif_msg_link(cp)) 1687 if (cp->opened)
1708 printk(KERN_INFO "%s: Link down\n", 1688 netif_info(cp, link, cp->dev, "Link down\n");
1709 cp->dev->name);
1710 restart = 1; 1689 restart = 1;
1711 1690
1712 } else if (++cp->timer_ticks > 10) 1691 } else if (++cp->timer_ticks > 10)
@@ -1737,23 +1716,23 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1737 if (!stat) 1716 if (!stat)
1738 return 0; 1717 return 0;
1739 1718
1740 printk(KERN_ERR "%s: PCI error [%04x:%04x] ", dev->name, stat, 1719 netdev_err(dev, "PCI error [%04x:%04x]",
1741 readl(cp->regs + REG_BIM_DIAG)); 1720 stat, readl(cp->regs + REG_BIM_DIAG));
1742 1721
1743 /* cassini+ has this reserved */ 1722 /* cassini+ has this reserved */
1744 if ((stat & PCI_ERR_BADACK) && 1723 if ((stat & PCI_ERR_BADACK) &&
1745 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) 1724 ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0))
1746 printk("<No ACK64# during ABS64 cycle> "); 1725 pr_cont(" <No ACK64# during ABS64 cycle>");
1747 1726
1748 if (stat & PCI_ERR_DTRTO) 1727 if (stat & PCI_ERR_DTRTO)
1749 printk("<Delayed transaction timeout> "); 1728 pr_cont(" <Delayed transaction timeout>");
1750 if (stat & PCI_ERR_OTHER) 1729 if (stat & PCI_ERR_OTHER)
1751 printk("<other> "); 1730 pr_cont(" <other>");
1752 if (stat & PCI_ERR_BIM_DMA_WRITE) 1731 if (stat & PCI_ERR_BIM_DMA_WRITE)
1753 printk("<BIM DMA 0 write req> "); 1732 pr_cont(" <BIM DMA 0 write req>");
1754 if (stat & PCI_ERR_BIM_DMA_READ) 1733 if (stat & PCI_ERR_BIM_DMA_READ)
1755 printk("<BIM DMA 0 read req> "); 1734 pr_cont(" <BIM DMA 0 read req>");
1756 printk("\n"); 1735 pr_cont("\n");
1757 1736
1758 if (stat & PCI_ERR_OTHER) { 1737 if (stat & PCI_ERR_OTHER) {
1759 u16 cfg; 1738 u16 cfg;
@@ -1762,25 +1741,19 @@ static int cas_pci_interrupt(struct net_device *dev, struct cas *cp,
1762 * true cause. 1741 * true cause.
1763 */ 1742 */
1764 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); 1743 pci_read_config_word(cp->pdev, PCI_STATUS, &cfg);
1765 printk(KERN_ERR "%s: Read PCI cfg space status [%04x]\n", 1744 netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg);
1766 dev->name, cfg);
1767 if (cfg & PCI_STATUS_PARITY) 1745 if (cfg & PCI_STATUS_PARITY)
1768 printk(KERN_ERR "%s: PCI parity error detected.\n", 1746 netdev_err(dev, "PCI parity error detected\n");
1769 dev->name);
1770 if (cfg & PCI_STATUS_SIG_TARGET_ABORT) 1747 if (cfg & PCI_STATUS_SIG_TARGET_ABORT)
1771 printk(KERN_ERR "%s: PCI target abort.\n", 1748 netdev_err(dev, "PCI target abort\n");
1772 dev->name);
1773 if (cfg & PCI_STATUS_REC_TARGET_ABORT) 1749 if (cfg & PCI_STATUS_REC_TARGET_ABORT)
1774 printk(KERN_ERR "%s: PCI master acks target abort.\n", 1750 netdev_err(dev, "PCI master acks target abort\n");
1775 dev->name);
1776 if (cfg & PCI_STATUS_REC_MASTER_ABORT) 1751 if (cfg & PCI_STATUS_REC_MASTER_ABORT)
1777 printk(KERN_ERR "%s: PCI master abort.\n", dev->name); 1752 netdev_err(dev, "PCI master abort\n");
1778 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) 1753 if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR)
1779 printk(KERN_ERR "%s: PCI system error SERR#.\n", 1754 netdev_err(dev, "PCI system error SERR#\n");
1780 dev->name);
1781 if (cfg & PCI_STATUS_DETECTED_PARITY) 1755 if (cfg & PCI_STATUS_DETECTED_PARITY)
1782 printk(KERN_ERR "%s: PCI parity error.\n", 1756 netdev_err(dev, "PCI parity error\n");
1783 dev->name);
1784 1757
1785 /* Write the error bits back to clear them. */ 1758 /* Write the error bits back to clear them. */
1786 cfg &= (PCI_STATUS_PARITY | 1759 cfg &= (PCI_STATUS_PARITY |
@@ -1806,9 +1779,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1806{ 1779{
1807 if (status & INTR_RX_TAG_ERROR) { 1780 if (status & INTR_RX_TAG_ERROR) {
1808 /* corrupt RX tag framing */ 1781 /* corrupt RX tag framing */
1809 if (netif_msg_rx_err(cp)) 1782 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1810 printk(KERN_DEBUG "%s: corrupt rx tag framing\n", 1783 "corrupt rx tag framing\n");
1811 cp->dev->name);
1812 spin_lock(&cp->stat_lock[0]); 1784 spin_lock(&cp->stat_lock[0]);
1813 cp->net_stats[0].rx_errors++; 1785 cp->net_stats[0].rx_errors++;
1814 spin_unlock(&cp->stat_lock[0]); 1786 spin_unlock(&cp->stat_lock[0]);
@@ -1817,9 +1789,8 @@ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp,
1817 1789
1818 if (status & INTR_RX_LEN_MISMATCH) { 1790 if (status & INTR_RX_LEN_MISMATCH) {
1819 /* length mismatch. */ 1791 /* length mismatch. */
1820 if (netif_msg_rx_err(cp)) 1792 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
1821 printk(KERN_DEBUG "%s: length mismatch for rx frame\n", 1793 "length mismatch for rx frame\n");
1822 cp->dev->name);
1823 spin_lock(&cp->stat_lock[0]); 1794 spin_lock(&cp->stat_lock[0]);
1824 cp->net_stats[0].rx_errors++; 1795 cp->net_stats[0].rx_errors++;
1825 spin_unlock(&cp->stat_lock[0]); 1796 spin_unlock(&cp->stat_lock[0]);
@@ -1861,12 +1832,11 @@ do_reset:
1861#if 1 1832#if 1
1862 atomic_inc(&cp->reset_task_pending); 1833 atomic_inc(&cp->reset_task_pending);
1863 atomic_inc(&cp->reset_task_pending_all); 1834 atomic_inc(&cp->reset_task_pending_all);
1864 printk(KERN_ERR "%s:reset called in cas_abnormal_irq [0x%x]\n", 1835 netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status);
1865 dev->name, status);
1866 schedule_work(&cp->reset_task); 1836 schedule_work(&cp->reset_task);
1867#else 1837#else
1868 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 1838 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
1869 printk(KERN_ERR "reset called in cas_abnormal_irq\n"); 1839 netdev_err(dev, "reset called in cas_abnormal_irq\n");
1870 schedule_work(&cp->reset_task); 1840 schedule_work(&cp->reset_task);
1871#endif 1841#endif
1872 return 1; 1842 return 1;
@@ -1920,9 +1890,8 @@ static inline void cas_tx_ringN(struct cas *cp, int ring, int limit)
1920 if (count < 0) 1890 if (count < 0)
1921 break; 1891 break;
1922 1892
1923 if (netif_msg_tx_done(cp)) 1893 netif_printk(cp, tx_done, KERN_DEBUG, cp->dev,
1924 printk(KERN_DEBUG "%s: tx[%d] done, slot %d\n", 1894 "tx[%d] done, slot %d\n", ring, entry);
1925 cp->dev->name, ring, entry);
1926 1895
1927 skbs[entry] = NULL; 1896 skbs[entry] = NULL;
1928 cp->tx_tiny_use[ring][entry].nbufs = 0; 1897 cp->tx_tiny_use[ring][entry].nbufs = 0;
@@ -1969,9 +1938,9 @@ static void cas_tx(struct net_device *dev, struct cas *cp,
1969#ifdef USE_TX_COMPWB 1938#ifdef USE_TX_COMPWB
1970 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); 1939 u64 compwb = le64_to_cpu(cp->init_block->tx_compwb);
1971#endif 1940#endif
1972 if (netif_msg_intr(cp)) 1941 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
1973 printk(KERN_DEBUG "%s: tx interrupt, status: 0x%x, %llx\n", 1942 "tx interrupt, status: 0x%x, %llx\n",
1974 cp->dev->name, status, (unsigned long long)compwb); 1943 status, (unsigned long long)compwb);
1975 /* process all the rings */ 1944 /* process all the rings */
1976 for (ring = 0; ring < N_TX_RINGS; ring++) { 1945 for (ring = 0; ring < N_TX_RINGS; ring++) {
1977#ifdef USE_TX_COMPWB 1946#ifdef USE_TX_COMPWB
@@ -2050,10 +2019,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2050 2019
2051 hlen = min(cp->page_size - off, dlen); 2020 hlen = min(cp->page_size - off, dlen);
2052 if (hlen < 0) { 2021 if (hlen < 0) {
2053 if (netif_msg_rx_err(cp)) { 2022 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2054 printk(KERN_DEBUG "%s: rx page overflow: " 2023 "rx page overflow: %d\n", hlen);
2055 "%d\n", cp->dev->name, hlen);
2056 }
2057 dev_kfree_skb_irq(skb); 2024 dev_kfree_skb_irq(skb);
2058 return -1; 2025 return -1;
2059 } 2026 }
@@ -2130,10 +2097,8 @@ static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc,
2130 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; 2097 off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel;
2131 hlen = min(cp->page_size - off, dlen); 2098 hlen = min(cp->page_size - off, dlen);
2132 if (hlen < 0) { 2099 if (hlen < 0) {
2133 if (netif_msg_rx_err(cp)) { 2100 netif_printk(cp, rx_err, KERN_DEBUG, cp->dev,
2134 printk(KERN_DEBUG "%s: rx page overflow: " 2101 "rx page overflow: %d\n", hlen);
2135 "%d\n", cp->dev->name, hlen);
2136 }
2137 dev_kfree_skb_irq(skb); 2102 dev_kfree_skb_irq(skb);
2138 return -1; 2103 return -1;
2139 } 2104 }
@@ -2265,9 +2230,8 @@ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num)
2265 2230
2266 entry = cp->rx_old[ring]; 2231 entry = cp->rx_old[ring];
2267 2232
2268 if (netif_msg_intr(cp)) 2233 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2269 printk(KERN_DEBUG "%s: rxd[%d] interrupt, done: %d\n", 2234 "rxd[%d] interrupt, done: %d\n", ring, entry);
2270 cp->dev->name, ring, entry);
2271 2235
2272 cluster = -1; 2236 cluster = -1;
2273 count = entry & 0x3; 2237 count = entry & 0x3;
@@ -2337,11 +2301,10 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2337 int entry, drops; 2301 int entry, drops;
2338 int npackets = 0; 2302 int npackets = 0;
2339 2303
2340 if (netif_msg_intr(cp)) 2304 netif_printk(cp, intr, KERN_DEBUG, cp->dev,
2341 printk(KERN_DEBUG "%s: rx[%d] interrupt, done: %d/%d\n", 2305 "rx[%d] interrupt, done: %d/%d\n",
2342 cp->dev->name, ring, 2306 ring,
2343 readl(cp->regs + REG_RX_COMP_HEAD), 2307 readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]);
2344 cp->rx_new[ring]);
2345 2308
2346 entry = cp->rx_new[ring]; 2309 entry = cp->rx_new[ring];
2347 drops = 0; 2310 drops = 0;
@@ -2442,8 +2405,7 @@ static int cas_rx_ringN(struct cas *cp, int ring, int budget)
2442 cp->rx_new[ring] = entry; 2405 cp->rx_new[ring] = entry;
2443 2406
2444 if (drops) 2407 if (drops)
2445 printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", 2408 netdev_info(cp->dev, "Memory squeeze, deferring packet\n");
2446 cp->dev->name);
2447 return npackets; 2409 return npackets;
2448} 2410}
2449 2411
@@ -2457,10 +2419,9 @@ static void cas_post_rxcs_ringN(struct net_device *dev,
2457 2419
2458 last = cp->rx_cur[ring]; 2420 last = cp->rx_cur[ring];
2459 entry = cp->rx_new[ring]; 2421 entry = cp->rx_new[ring];
2460 if (netif_msg_intr(cp)) 2422 netif_printk(cp, intr, KERN_DEBUG, dev,
2461 printk(KERN_DEBUG "%s: rxc[%d] interrupt, done: %d/%d\n", 2423 "rxc[%d] interrupt, done: %d/%d\n",
2462 dev->name, ring, readl(cp->regs + REG_RX_COMP_HEAD), 2424 ring, readl(cp->regs + REG_RX_COMP_HEAD), entry);
2463 entry);
2464 2425
2465 /* zero and re-mark descriptors */ 2426 /* zero and re-mark descriptors */
2466 while (last != entry) { 2427 while (last != entry) {
@@ -2729,42 +2690,38 @@ static void cas_tx_timeout(struct net_device *dev)
2729{ 2690{
2730 struct cas *cp = netdev_priv(dev); 2691 struct cas *cp = netdev_priv(dev);
2731 2692
2732 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name); 2693 netdev_err(dev, "transmit timed out, resetting\n");
2733 if (!cp->hw_running) { 2694 if (!cp->hw_running) {
2734 printk("%s: hrm.. hw not running!\n", dev->name); 2695 netdev_err(dev, "hrm.. hw not running!\n");
2735 return; 2696 return;
2736 } 2697 }
2737 2698
2738 printk(KERN_ERR "%s: MIF_STATE[%08x]\n", 2699 netdev_err(dev, "MIF_STATE[%08x]\n",
2739 dev->name, readl(cp->regs + REG_MIF_STATE_MACHINE)); 2700 readl(cp->regs + REG_MIF_STATE_MACHINE));
2740 2701
2741 printk(KERN_ERR "%s: MAC_STATE[%08x]\n", 2702 netdev_err(dev, "MAC_STATE[%08x]\n",
2742 dev->name, readl(cp->regs + REG_MAC_STATE_MACHINE)); 2703 readl(cp->regs + REG_MAC_STATE_MACHINE));
2743 2704
2744 printk(KERN_ERR "%s: TX_STATE[%08x:%08x:%08x] " 2705 netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n",
2745 "FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", 2706 readl(cp->regs + REG_TX_CFG),
2746 dev->name, 2707 readl(cp->regs + REG_MAC_TX_STATUS),
2747 readl(cp->regs + REG_TX_CFG), 2708 readl(cp->regs + REG_MAC_TX_CFG),
2748 readl(cp->regs + REG_MAC_TX_STATUS), 2709 readl(cp->regs + REG_TX_FIFO_PKT_CNT),
2749 readl(cp->regs + REG_MAC_TX_CFG), 2710 readl(cp->regs + REG_TX_FIFO_WRITE_PTR),
2750 readl(cp->regs + REG_TX_FIFO_PKT_CNT), 2711 readl(cp->regs + REG_TX_FIFO_READ_PTR),
2751 readl(cp->regs + REG_TX_FIFO_WRITE_PTR), 2712 readl(cp->regs + REG_TX_SM_1),
2752 readl(cp->regs + REG_TX_FIFO_READ_PTR), 2713 readl(cp->regs + REG_TX_SM_2));
2753 readl(cp->regs + REG_TX_SM_1), 2714
2754 readl(cp->regs + REG_TX_SM_2)); 2715 netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n",
2755 2716 readl(cp->regs + REG_RX_CFG),
2756 printk(KERN_ERR "%s: RX_STATE[%08x:%08x:%08x]\n", 2717 readl(cp->regs + REG_MAC_RX_STATUS),
2757 dev->name, 2718 readl(cp->regs + REG_MAC_RX_CFG));
2758 readl(cp->regs + REG_RX_CFG), 2719
2759 readl(cp->regs + REG_MAC_RX_STATUS), 2720 netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n",
2760 readl(cp->regs + REG_MAC_RX_CFG)); 2721 readl(cp->regs + REG_HP_STATE_MACHINE),
2761 2722 readl(cp->regs + REG_HP_STATUS0),
2762 printk(KERN_ERR "%s: HP_STATE[%08x:%08x:%08x:%08x]\n", 2723 readl(cp->regs + REG_HP_STATUS1),
2763 dev->name, 2724 readl(cp->regs + REG_HP_STATUS2));
2764 readl(cp->regs + REG_HP_STATE_MACHINE),
2765 readl(cp->regs + REG_HP_STATUS0),
2766 readl(cp->regs + REG_HP_STATUS1),
2767 readl(cp->regs + REG_HP_STATUS2));
2768 2725
2769#if 1 2726#if 1
2770 atomic_inc(&cp->reset_task_pending); 2727 atomic_inc(&cp->reset_task_pending);
@@ -2830,8 +2787,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2830 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { 2787 CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) {
2831 netif_stop_queue(dev); 2788 netif_stop_queue(dev);
2832 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2789 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2833 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 2790 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
2834 "queue awake!\n", dev->name);
2835 return 1; 2791 return 1;
2836 } 2792 }
2837 2793
@@ -2908,11 +2864,9 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2908 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) 2864 if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))
2909 netif_stop_queue(dev); 2865 netif_stop_queue(dev);
2910 2866
2911 if (netif_msg_tx_queued(cp)) 2867 netif_printk(cp, tx_queued, KERN_DEBUG, dev,
2912 printk(KERN_DEBUG "%s: tx[%d] queued, slot %d, skblen %d, " 2868 "tx[%d] queued, slot %d, skblen %d, avail %d\n",
2913 "avail %d\n", 2869 ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring));
2914 dev->name, ring, entry, skb->len,
2915 TX_BUFFS_AVAIL(cp, ring));
2916 writel(entry, cp->regs + REG_TX_KICKN(ring)); 2870 writel(entry, cp->regs + REG_TX_KICKN(ring));
2917 spin_unlock_irqrestore(&cp->tx_lock[ring], flags); 2871 spin_unlock_irqrestore(&cp->tx_lock[ring], flags);
2918 return 0; 2872 return 0;
@@ -3098,10 +3052,10 @@ static void cas_mac_reset(struct cas *cp)
3098 3052
3099 if (readl(cp->regs + REG_MAC_TX_RESET) | 3053 if (readl(cp->regs + REG_MAC_TX_RESET) |
3100 readl(cp->regs + REG_MAC_RX_RESET)) 3054 readl(cp->regs + REG_MAC_RX_RESET))
3101 printk(KERN_ERR "%s: mac tx[%d]/rx[%d] reset failed [%08x]\n", 3055 netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n",
3102 cp->dev->name, readl(cp->regs + REG_MAC_TX_RESET), 3056 readl(cp->regs + REG_MAC_TX_RESET),
3103 readl(cp->regs + REG_MAC_RX_RESET), 3057 readl(cp->regs + REG_MAC_RX_RESET),
3104 readl(cp->regs + REG_MAC_STATE_MACHINE)); 3058 readl(cp->regs + REG_MAC_STATE_MACHINE));
3105} 3059}
3106 3060
3107 3061
@@ -3421,7 +3375,7 @@ use_random_mac_addr:
3421 goto done; 3375 goto done;
3422 3376
3423 /* Sun MAC prefix then 3 random bytes. */ 3377 /* Sun MAC prefix then 3 random bytes. */
3424 printk(PFX "MAC address not found in ROM VPD\n"); 3378 pr_info("MAC address not found in ROM VPD\n");
3425 dev_addr[0] = 0x08; 3379 dev_addr[0] = 0x08;
3426 dev_addr[1] = 0x00; 3380 dev_addr[1] = 0x00;
3427 dev_addr[2] = 0x20; 3381 dev_addr[2] = 0x20;
@@ -3482,7 +3436,7 @@ static int cas_check_invariants(struct cas *cp)
3482 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); 3436 __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT);
3483 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; 3437 cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT;
3484 } else { 3438 } else {
3485 printk(PFX "MTU limited to %d bytes\n", CAS_MAX_MTU); 3439 printk("MTU limited to %d bytes\n", CAS_MAX_MTU);
3486 } 3440 }
3487 } 3441 }
3488#endif 3442#endif
@@ -3527,7 +3481,7 @@ static int cas_check_invariants(struct cas *cp)
3527 } 3481 }
3528 } 3482 }
3529 } 3483 }
3530 printk(KERN_ERR PFX "MII phy did not respond [%08x]\n", 3484 pr_err("MII phy did not respond [%08x]\n",
3531 readl(cp->regs + REG_MIF_STATE_MACHINE)); 3485 readl(cp->regs + REG_MIF_STATE_MACHINE));
3532 return -1; 3486 return -1;
3533 3487
@@ -3572,21 +3526,19 @@ static inline void cas_start_dma(struct cas *cp)
3572 val = readl(cp->regs + REG_MAC_RX_CFG); 3526 val = readl(cp->regs + REG_MAC_RX_CFG);
3573 if ((val & MAC_RX_CFG_EN)) { 3527 if ((val & MAC_RX_CFG_EN)) {
3574 if (txfailed) { 3528 if (txfailed) {
3575 printk(KERN_ERR 3529 netdev_err(cp->dev,
3576 "%s: enabling mac failed [tx:%08x:%08x].\n", 3530 "enabling mac failed [tx:%08x:%08x]\n",
3577 cp->dev->name, 3531 readl(cp->regs + REG_MIF_STATE_MACHINE),
3578 readl(cp->regs + REG_MIF_STATE_MACHINE), 3532 readl(cp->regs + REG_MAC_STATE_MACHINE));
3579 readl(cp->regs + REG_MAC_STATE_MACHINE));
3580 } 3533 }
3581 goto enable_rx_done; 3534 goto enable_rx_done;
3582 } 3535 }
3583 udelay(10); 3536 udelay(10);
3584 } 3537 }
3585 printk(KERN_ERR "%s: enabling mac failed [%s:%08x:%08x].\n", 3538 netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n",
3586 cp->dev->name, 3539 (txfailed ? "tx,rx" : "rx"),
3587 (txfailed? "tx,rx":"rx"), 3540 readl(cp->regs + REG_MIF_STATE_MACHINE),
3588 readl(cp->regs + REG_MIF_STATE_MACHINE), 3541 readl(cp->regs + REG_MAC_STATE_MACHINE));
3589 readl(cp->regs + REG_MAC_STATE_MACHINE));
3590 3542
3591enable_rx_done: 3543enable_rx_done:
3592 cas_unmask_intr(cp); /* enable interrupts */ 3544 cas_unmask_intr(cp); /* enable interrupts */
@@ -3688,9 +3640,8 @@ static void cas_set_link_modes(struct cas *cp)
3688 } 3640 }
3689 } 3641 }
3690 3642
3691 if (netif_msg_link(cp)) 3643 netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n",
3692 printk(KERN_INFO "%s: Link up at %d Mbps, %s-duplex.\n", 3644 speed, full_duplex ? "full" : "half");
3693 cp->dev->name, speed, (full_duplex ? "full" : "half"));
3694 3645
3695 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; 3646 val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED;
3696 if (CAS_PHY_MII(cp->phy_type)) { 3647 if (CAS_PHY_MII(cp->phy_type)) {
@@ -3760,18 +3711,14 @@ static void cas_set_link_modes(struct cas *cp)
3760 3711
3761 if (netif_msg_link(cp)) { 3712 if (netif_msg_link(cp)) {
3762 if (pause & 0x01) { 3713 if (pause & 0x01) {
3763 printk(KERN_INFO "%s: Pause is enabled " 3714 netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n",
3764 "(rxfifo: %d off: %d on: %d)\n", 3715 cp->rx_fifo_size,
3765 cp->dev->name, 3716 cp->rx_pause_off,
3766 cp->rx_fifo_size, 3717 cp->rx_pause_on);
3767 cp->rx_pause_off,
3768 cp->rx_pause_on);
3769 } else if (pause & 0x10) { 3718 } else if (pause & 0x10) {
3770 printk(KERN_INFO "%s: TX pause enabled\n", 3719 netdev_info(cp->dev, "TX pause enabled\n");
3771 cp->dev->name);
3772 } else { 3720 } else {
3773 printk(KERN_INFO "%s: Pause is disabled\n", 3721 netdev_info(cp->dev, "Pause is disabled\n");
3774 cp->dev->name);
3775 } 3722 }
3776 } 3723 }
3777 3724
@@ -3847,7 +3794,7 @@ static void cas_global_reset(struct cas *cp, int blkflag)
3847 goto done; 3794 goto done;
3848 udelay(10); 3795 udelay(10);
3849 } 3796 }
3850 printk(KERN_ERR "%s: sw reset failed.\n", cp->dev->name); 3797 netdev_err(cp->dev, "sw reset failed\n");
3851 3798
3852done: 3799done:
3853 /* enable various BIM interrupts */ 3800 /* enable various BIM interrupts */
@@ -3953,7 +3900,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
3953#else 3900#else
3954 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? 3901 atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ?
3955 CAS_RESET_ALL : CAS_RESET_MTU); 3902 CAS_RESET_ALL : CAS_RESET_MTU);
3956 printk(KERN_ERR "reset called in cas_change_mtu\n"); 3903 pr_err("reset called in cas_change_mtu\n");
3957 schedule_work(&cp->reset_task); 3904 schedule_work(&cp->reset_task);
3958#endif 3905#endif
3959 3906
@@ -4235,10 +4182,8 @@ static void cas_link_timer(unsigned long data)
4235 4182
4236 if (((tlm == 0x5) || (tlm == 0x3)) && 4183 if (((tlm == 0x5) || (tlm == 0x3)) &&
4237 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { 4184 (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) {
4238 if (netif_msg_tx_err(cp)) 4185 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4239 printk(KERN_DEBUG "%s: tx err: " 4186 "tx err: MAC_STATE[%08x]\n", val);
4240 "MAC_STATE[%08x]\n",
4241 cp->dev->name, val);
4242 reset = 1; 4187 reset = 1;
4243 goto done; 4188 goto done;
4244 } 4189 }
@@ -4247,10 +4192,9 @@ static void cas_link_timer(unsigned long data)
4247 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); 4192 wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR);
4248 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); 4193 rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR);
4249 if ((val == 0) && (wptr != rptr)) { 4194 if ((val == 0) && (wptr != rptr)) {
4250 if (netif_msg_tx_err(cp)) 4195 netif_printk(cp, tx_err, KERN_DEBUG, cp->dev,
4251 printk(KERN_DEBUG "%s: tx err: " 4196 "tx err: TX_FIFO[%08x:%08x:%08x]\n",
4252 "TX_FIFO[%08x:%08x:%08x]\n", 4197 val, wptr, rptr);
4253 cp->dev->name, val, wptr, rptr);
4254 reset = 1; 4198 reset = 1;
4255 } 4199 }
4256 4200
@@ -4266,7 +4210,7 @@ done:
4266 schedule_work(&cp->reset_task); 4210 schedule_work(&cp->reset_task);
4267#else 4211#else
4268 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); 4212 atomic_set(&cp->reset_task_pending, CAS_RESET_ALL);
4269 printk(KERN_ERR "reset called in cas_link_timer\n"); 4213 pr_err("reset called in cas_link_timer\n");
4270 schedule_work(&cp->reset_task); 4214 schedule_work(&cp->reset_task);
4271#endif 4215#endif
4272 } 4216 }
@@ -4359,8 +4303,7 @@ static int cas_open(struct net_device *dev)
4359 */ 4303 */
4360 if (request_irq(cp->pdev->irq, cas_interrupt, 4304 if (request_irq(cp->pdev->irq, cas_interrupt,
4361 IRQF_SHARED, dev->name, (void *) dev)) { 4305 IRQF_SHARED, dev->name, (void *) dev)) {
4362 printk(KERN_ERR "%s: failed to request irq !\n", 4306 netdev_err(cp->dev, "failed to request irq !\n");
4363 cp->dev->name);
4364 err = -EAGAIN; 4307 err = -EAGAIN;
4365 goto err_spare; 4308 goto err_spare;
4366 } 4309 }
@@ -5000,24 +4943,24 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5000 u8 orig_cacheline_size = 0, cas_cacheline_size = 0; 4943 u8 orig_cacheline_size = 0, cas_cacheline_size = 0;
5001 4944
5002 if (cas_version_printed++ == 0) 4945 if (cas_version_printed++ == 0)
5003 printk(KERN_INFO "%s", version); 4946 pr_info("%s", version);
5004 4947
5005 err = pci_enable_device(pdev); 4948 err = pci_enable_device(pdev);
5006 if (err) { 4949 if (err) {
5007 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n"); 4950 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
5008 return err; 4951 return err;
5009 } 4952 }
5010 4953
5011 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { 4954 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
5012 dev_err(&pdev->dev, "Cannot find proper PCI device " 4955 dev_err(&pdev->dev, "Cannot find proper PCI device "
5013 "base address, aborting.\n"); 4956 "base address, aborting\n");
5014 err = -ENODEV; 4957 err = -ENODEV;
5015 goto err_out_disable_pdev; 4958 goto err_out_disable_pdev;
5016 } 4959 }
5017 4960
5018 dev = alloc_etherdev(sizeof(*cp)); 4961 dev = alloc_etherdev(sizeof(*cp));
5019 if (!dev) { 4962 if (!dev) {
5020 dev_err(&pdev->dev, "Etherdev alloc failed, aborting.\n"); 4963 dev_err(&pdev->dev, "Etherdev alloc failed, aborting\n");
5021 err = -ENOMEM; 4964 err = -ENOMEM;
5022 goto err_out_disable_pdev; 4965 goto err_out_disable_pdev;
5023 } 4966 }
@@ -5025,7 +4968,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5025 4968
5026 err = pci_request_regions(pdev, dev->name); 4969 err = pci_request_regions(pdev, dev->name);
5027 if (err) { 4970 if (err) {
5028 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n"); 4971 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
5029 goto err_out_free_netdev; 4972 goto err_out_free_netdev;
5030 } 4973 }
5031 pci_set_master(pdev); 4974 pci_set_master(pdev);
@@ -5039,8 +4982,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5039 pci_cmd |= PCI_COMMAND_PARITY; 4982 pci_cmd |= PCI_COMMAND_PARITY;
5040 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); 4983 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
5041 if (pci_try_set_mwi(pdev)) 4984 if (pci_try_set_mwi(pdev))
5042 printk(KERN_WARNING PFX "Could not enable MWI for %s\n", 4985 pr_warning("Could not enable MWI for %s\n", pci_name(pdev));
5043 pci_name(pdev));
5044 4986
5045 cas_program_bridge(pdev); 4987 cas_program_bridge(pdev);
5046 4988
@@ -5083,7 +5025,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5083 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 5025 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
5084 if (err) { 5026 if (err) {
5085 dev_err(&pdev->dev, "No usable DMA configuration, " 5027 dev_err(&pdev->dev, "No usable DMA configuration, "
5086 "aborting.\n"); 5028 "aborting\n");
5087 goto err_out_free_res; 5029 goto err_out_free_res;
5088 } 5030 }
5089 pci_using_dac = 0; 5031 pci_using_dac = 0;
@@ -5142,7 +5084,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5142 /* give us access to cassini registers */ 5084 /* give us access to cassini registers */
5143 cp->regs = pci_iomap(pdev, 0, casreg_len); 5085 cp->regs = pci_iomap(pdev, 0, casreg_len);
5144 if (!cp->regs) { 5086 if (!cp->regs) {
5145 dev_err(&pdev->dev, "Cannot map device registers, aborting.\n"); 5087 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
5146 goto err_out_free_res; 5088 goto err_out_free_res;
5147 } 5089 }
5148 cp->casreg_len = casreg_len; 5090 cp->casreg_len = casreg_len;
@@ -5161,7 +5103,7 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5161 pci_alloc_consistent(pdev, sizeof(struct cas_init_block), 5103 pci_alloc_consistent(pdev, sizeof(struct cas_init_block),
5162 &cp->block_dvma); 5104 &cp->block_dvma);
5163 if (!cp->init_block) { 5105 if (!cp->init_block) {
5164 dev_err(&pdev->dev, "Cannot allocate init block, aborting.\n"); 5106 dev_err(&pdev->dev, "Cannot allocate init block, aborting\n");
5165 goto err_out_iounmap; 5107 goto err_out_iounmap;
5166 } 5108 }
5167 5109
@@ -5195,18 +5137,17 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5195 dev->features |= NETIF_F_HIGHDMA; 5137 dev->features |= NETIF_F_HIGHDMA;
5196 5138
5197 if (register_netdev(dev)) { 5139 if (register_netdev(dev)) {
5198 dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); 5140 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
5199 goto err_out_free_consistent; 5141 goto err_out_free_consistent;
5200 } 5142 }
5201 5143
5202 i = readl(cp->regs + REG_BIM_CFG); 5144 i = readl(cp->regs + REG_BIM_CFG);
5203 printk(KERN_INFO "%s: Sun Cassini%s (%sbit/%sMHz PCI/%s) " 5145 netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n",
5204 "Ethernet[%d] %pM\n", dev->name, 5146 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "",
5205 (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", 5147 (i & BIM_CFG_32BIT) ? "32" : "64",
5206 (i & BIM_CFG_32BIT) ? "32" : "64", 5148 (i & BIM_CFG_66MHZ) ? "66" : "33",
5207 (i & BIM_CFG_66MHZ) ? "66" : "33", 5149 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq,
5208 (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, 5150 dev->dev_addr);
5209 dev->dev_addr);
5210 5151
5211 pci_set_drvdata(pdev, dev); 5152 pci_set_drvdata(pdev, dev);
5212 cp->hw_running = 1; 5153 cp->hw_running = 1;
@@ -5320,7 +5261,7 @@ static int cas_resume(struct pci_dev *pdev)
5320 struct net_device *dev = pci_get_drvdata(pdev); 5261 struct net_device *dev = pci_get_drvdata(pdev);
5321 struct cas *cp = netdev_priv(dev); 5262 struct cas *cp = netdev_priv(dev);
5322 5263
5323 printk(KERN_INFO "%s: resuming\n", dev->name); 5264 netdev_info(dev, "resuming\n");
5324 5265
5325 mutex_lock(&cp->pm_mutex); 5266 mutex_lock(&cp->pm_mutex);
5326 cas_hard_reset(cp); 5267 cas_hard_reset(cp);