aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/b44.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/b44.c')
-rw-r--r--drivers/net/b44.c178
1 files changed, 109 insertions, 69 deletions
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index 0ee3e27969c6..c53848f787eb 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -18,7 +18,6 @@
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/init.h> 20#include <linux/init.h>
21#include <linux/version.h>
22#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
23 22
24#include <asm/uaccess.h> 23#include <asm/uaccess.h>
@@ -29,8 +28,8 @@
29 28
30#define DRV_MODULE_NAME "b44" 29#define DRV_MODULE_NAME "b44"
31#define PFX DRV_MODULE_NAME ": " 30#define PFX DRV_MODULE_NAME ": "
32#define DRV_MODULE_VERSION "0.95" 31#define DRV_MODULE_VERSION "0.96"
33#define DRV_MODULE_RELDATE "Aug 3, 2004" 32#define DRV_MODULE_RELDATE "Nov 8, 2005"
34 33
35#define B44_DEF_MSG_ENABLE \ 34#define B44_DEF_MSG_ENABLE \
36 (NETIF_MSG_DRV | \ 35 (NETIF_MSG_DRV | \
@@ -102,14 +101,16 @@ MODULE_DEVICE_TABLE(pci, b44_pci_tbl);
102static void b44_halt(struct b44 *); 101static void b44_halt(struct b44 *);
103static void b44_init_rings(struct b44 *); 102static void b44_init_rings(struct b44 *);
104static void b44_init_hw(struct b44 *); 103static void b44_init_hw(struct b44 *);
105static int b44_poll(struct net_device *dev, int *budget);
106#ifdef CONFIG_NET_POLL_CONTROLLER
107static void b44_poll_controller(struct net_device *dev);
108#endif
109 104
110static int dma_desc_align_mask; 105static int dma_desc_align_mask;
111static int dma_desc_sync_size; 106static int dma_desc_sync_size;
112 107
108static const char b44_gstrings[][ETH_GSTRING_LEN] = {
109#define _B44(x...) # x,
110B44_STAT_REG_DECLARE
111#undef _B44
112};
113
113static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev, 114static inline void b44_sync_dma_desc_for_device(struct pci_dev *pdev,
114 dma_addr_t dma_base, 115 dma_addr_t dma_base,
115 unsigned long offset, 116 unsigned long offset,
@@ -502,7 +503,10 @@ static void b44_stats_update(struct b44 *bp)
502 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { 503 for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) {
503 *val++ += br32(bp, reg); 504 *val++ += br32(bp, reg);
504 } 505 }
505 val = &bp->hw_stats.rx_good_octets; 506
507 /* Pad */
508 reg += 8*4UL;
509
506 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { 510 for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) {
507 *val++ += br32(bp, reg); 511 *val++ += br32(bp, reg);
508 } 512 }
@@ -653,7 +657,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
653 657
654 /* Hardware bug work-around, the chip is unable to do PCI DMA 658 /* Hardware bug work-around, the chip is unable to do PCI DMA
655 to/from anything above 1GB :-( */ 659 to/from anything above 1GB :-( */
656 if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) { 660 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
657 /* Sigh... */ 661 /* Sigh... */
658 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 662 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
659 dev_kfree_skb_any(skb); 663 dev_kfree_skb_any(skb);
@@ -663,7 +667,7 @@ static int b44_alloc_rx_skb(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
663 mapping = pci_map_single(bp->pdev, skb->data, 667 mapping = pci_map_single(bp->pdev, skb->data,
664 RX_PKT_BUF_SZ, 668 RX_PKT_BUF_SZ,
665 PCI_DMA_FROMDEVICE); 669 PCI_DMA_FROMDEVICE);
666 if(mapping+RX_PKT_BUF_SZ > B44_DMA_MASK) { 670 if (mapping + RX_PKT_BUF_SZ > B44_DMA_MASK) {
667 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE); 671 pci_unmap_single(bp->pdev, mapping, RX_PKT_BUF_SZ,PCI_DMA_FROMDEVICE);
668 dev_kfree_skb_any(skb); 672 dev_kfree_skb_any(skb);
669 return -ENOMEM; 673 return -ENOMEM;
@@ -890,11 +894,10 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
890{ 894{
891 struct net_device *dev = dev_id; 895 struct net_device *dev = dev_id;
892 struct b44 *bp = netdev_priv(dev); 896 struct b44 *bp = netdev_priv(dev);
893 unsigned long flags;
894 u32 istat, imask; 897 u32 istat, imask;
895 int handled = 0; 898 int handled = 0;
896 899
897 spin_lock_irqsave(&bp->lock, flags); 900 spin_lock(&bp->lock);
898 901
899 istat = br32(bp, B44_ISTAT); 902 istat = br32(bp, B44_ISTAT);
900 imask = br32(bp, B44_IMASK); 903 imask = br32(bp, B44_IMASK);
@@ -905,6 +908,12 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
905 istat &= imask; 908 istat &= imask;
906 if (istat) { 909 if (istat) {
907 handled = 1; 910 handled = 1;
911
912 if (unlikely(!netif_running(dev))) {
913 printk(KERN_INFO "%s: late interrupt.\n", dev->name);
914 goto irq_ack;
915 }
916
908 if (netif_rx_schedule_prep(dev)) { 917 if (netif_rx_schedule_prep(dev)) {
909 /* NOTE: These writes are posted by the readback of 918 /* NOTE: These writes are posted by the readback of
910 * the ISTAT register below. 919 * the ISTAT register below.
@@ -917,10 +926,11 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
917 dev->name); 926 dev->name);
918 } 927 }
919 928
929irq_ack:
920 bw32(bp, B44_ISTAT, istat); 930 bw32(bp, B44_ISTAT, istat);
921 br32(bp, B44_ISTAT); 931 br32(bp, B44_ISTAT);
922 } 932 }
923 spin_unlock_irqrestore(&bp->lock, flags); 933 spin_unlock(&bp->lock);
924 return IRQ_RETVAL(handled); 934 return IRQ_RETVAL(handled);
925} 935}
926 936
@@ -948,6 +958,7 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
948{ 958{
949 struct b44 *bp = netdev_priv(dev); 959 struct b44 *bp = netdev_priv(dev);
950 struct sk_buff *bounce_skb; 960 struct sk_buff *bounce_skb;
961 int rc = NETDEV_TX_OK;
951 dma_addr_t mapping; 962 dma_addr_t mapping;
952 u32 len, entry, ctrl; 963 u32 len, entry, ctrl;
953 964
@@ -957,29 +968,28 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
957 /* This is a hard error, log it. */ 968 /* This is a hard error, log it. */
958 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { 969 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
959 netif_stop_queue(dev); 970 netif_stop_queue(dev);
960 spin_unlock_irq(&bp->lock);
961 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", 971 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
962 dev->name); 972 dev->name);
963 return 1; 973 goto err_out;
964 } 974 }
965 975
966 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); 976 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
967 if(mapping+len > B44_DMA_MASK) { 977 if (mapping + len > B44_DMA_MASK) {
968 /* Chip can't handle DMA to/from >1GB, use bounce buffer */ 978 /* Chip can't handle DMA to/from >1GB, use bounce buffer */
969 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE); 979 pci_unmap_single(bp->pdev, mapping, len, PCI_DMA_TODEVICE);
970 980
971 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ, 981 bounce_skb = __dev_alloc_skb(TX_PKT_BUF_SZ,
972 GFP_ATOMIC|GFP_DMA); 982 GFP_ATOMIC|GFP_DMA);
973 if (!bounce_skb) 983 if (!bounce_skb)
974 return NETDEV_TX_BUSY; 984 goto err_out;
975 985
976 mapping = pci_map_single(bp->pdev, bounce_skb->data, 986 mapping = pci_map_single(bp->pdev, bounce_skb->data,
977 len, PCI_DMA_TODEVICE); 987 len, PCI_DMA_TODEVICE);
978 if(mapping+len > B44_DMA_MASK) { 988 if (mapping + len > B44_DMA_MASK) {
979 pci_unmap_single(bp->pdev, mapping, 989 pci_unmap_single(bp->pdev, mapping,
980 len, PCI_DMA_TODEVICE); 990 len, PCI_DMA_TODEVICE);
981 dev_kfree_skb_any(bounce_skb); 991 dev_kfree_skb_any(bounce_skb);
982 return NETDEV_TX_BUSY; 992 goto err_out;
983 } 993 }
984 994
985 memcpy(skb_put(bounce_skb, len), skb->data, skb->len); 995 memcpy(skb_put(bounce_skb, len), skb->data, skb->len);
@@ -1019,11 +1029,16 @@ static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1019 if (TX_BUFFS_AVAIL(bp) < 1) 1029 if (TX_BUFFS_AVAIL(bp) < 1)
1020 netif_stop_queue(dev); 1030 netif_stop_queue(dev);
1021 1031
1032 dev->trans_start = jiffies;
1033
1034out_unlock:
1022 spin_unlock_irq(&bp->lock); 1035 spin_unlock_irq(&bp->lock);
1023 1036
1024 dev->trans_start = jiffies; 1037 return rc;
1025 1038
1026 return 0; 1039err_out:
1040 rc = NETDEV_TX_BUSY;
1041 goto out_unlock;
1027} 1042}
1028 1043
1029static int b44_change_mtu(struct net_device *dev, int new_mtu) 1044static int b44_change_mtu(struct net_device *dev, int new_mtu)
@@ -1097,8 +1112,7 @@ static void b44_free_rings(struct b44 *bp)
1097 * 1112 *
1098 * The chip has been shut down and the driver detached from 1113 * The chip has been shut down and the driver detached from
1099 * the networking, so no interrupts or new tx packets will 1114 * the networking, so no interrupts or new tx packets will
1100 * end up in the driver. bp->lock is not held and we are not 1115 * end up in the driver.
1101 * in an interrupt context and thus may sleep.
1102 */ 1116 */
1103static void b44_init_rings(struct b44 *bp) 1117static void b44_init_rings(struct b44 *bp)
1104{ 1118{
@@ -1170,16 +1184,14 @@ static int b44_alloc_consistent(struct b44 *bp)
1170 int size; 1184 int size;
1171 1185
1172 size = B44_RX_RING_SIZE * sizeof(struct ring_info); 1186 size = B44_RX_RING_SIZE * sizeof(struct ring_info);
1173 bp->rx_buffers = kmalloc(size, GFP_KERNEL); 1187 bp->rx_buffers = kzalloc(size, GFP_KERNEL);
1174 if (!bp->rx_buffers) 1188 if (!bp->rx_buffers)
1175 goto out_err; 1189 goto out_err;
1176 memset(bp->rx_buffers, 0, size);
1177 1190
1178 size = B44_TX_RING_SIZE * sizeof(struct ring_info); 1191 size = B44_TX_RING_SIZE * sizeof(struct ring_info);
1179 bp->tx_buffers = kmalloc(size, GFP_KERNEL); 1192 bp->tx_buffers = kzalloc(size, GFP_KERNEL);
1180 if (!bp->tx_buffers) 1193 if (!bp->tx_buffers)
1181 goto out_err; 1194 goto out_err;
1182 memset(bp->tx_buffers, 0, size);
1183 1195
1184 size = DMA_TABLE_BYTES; 1196 size = DMA_TABLE_BYTES;
1185 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma); 1197 bp->rx_ring = pci_alloc_consistent(bp->pdev, size, &bp->rx_ring_dma);
@@ -1190,10 +1202,10 @@ static int b44_alloc_consistent(struct b44 *bp)
1190 struct dma_desc *rx_ring; 1202 struct dma_desc *rx_ring;
1191 dma_addr_t rx_ring_dma; 1203 dma_addr_t rx_ring_dma;
1192 1204
1193 if (!(rx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL))) 1205 rx_ring = kzalloc(size, GFP_KERNEL);
1206 if (!rx_ring)
1194 goto out_err; 1207 goto out_err;
1195 1208
1196 memset(rx_ring, 0, size);
1197 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring, 1209 rx_ring_dma = dma_map_single(&bp->pdev->dev, rx_ring,
1198 DMA_TABLE_BYTES, 1210 DMA_TABLE_BYTES,
1199 DMA_BIDIRECTIONAL); 1211 DMA_BIDIRECTIONAL);
@@ -1216,10 +1228,10 @@ static int b44_alloc_consistent(struct b44 *bp)
1216 struct dma_desc *tx_ring; 1228 struct dma_desc *tx_ring;
1217 dma_addr_t tx_ring_dma; 1229 dma_addr_t tx_ring_dma;
1218 1230
1219 if (!(tx_ring = (struct dma_desc *)kmalloc(size, GFP_KERNEL))) 1231 tx_ring = kzalloc(size, GFP_KERNEL);
1232 if (!tx_ring)
1220 goto out_err; 1233 goto out_err;
1221 1234
1222 memset(tx_ring, 0, size);
1223 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring, 1235 tx_ring_dma = dma_map_single(&bp->pdev->dev, tx_ring,
1224 DMA_TABLE_BYTES, 1236 DMA_TABLE_BYTES,
1225 DMA_TO_DEVICE); 1237 DMA_TO_DEVICE);
@@ -1382,22 +1394,21 @@ static int b44_open(struct net_device *dev)
1382 1394
1383 err = b44_alloc_consistent(bp); 1395 err = b44_alloc_consistent(bp);
1384 if (err) 1396 if (err)
1385 return err; 1397 goto out;
1386
1387 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1388 if (err)
1389 goto err_out_free;
1390
1391 spin_lock_irq(&bp->lock);
1392 1398
1393 b44_init_rings(bp); 1399 b44_init_rings(bp);
1394 b44_init_hw(bp); 1400 b44_init_hw(bp);
1395 bp->flags |= B44_FLAG_INIT_COMPLETE;
1396 1401
1397 netif_carrier_off(dev); 1402 netif_carrier_off(dev);
1398 b44_check_phy(bp); 1403 b44_check_phy(bp);
1399 1404
1400 spin_unlock_irq(&bp->lock); 1405 err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
1406 if (unlikely(err < 0)) {
1407 b44_chip_reset(bp);
1408 b44_free_rings(bp);
1409 b44_free_consistent(bp);
1410 goto out;
1411 }
1401 1412
1402 init_timer(&bp->timer); 1413 init_timer(&bp->timer);
1403 bp->timer.expires = jiffies + HZ; 1414 bp->timer.expires = jiffies + HZ;
@@ -1406,11 +1417,7 @@ static int b44_open(struct net_device *dev)
1406 add_timer(&bp->timer); 1417 add_timer(&bp->timer);
1407 1418
1408 b44_enable_ints(bp); 1419 b44_enable_ints(bp);
1409 1420out:
1410 return 0;
1411
1412err_out_free:
1413 b44_free_consistent(bp);
1414 return err; 1421 return err;
1415} 1422}
1416 1423
@@ -1445,6 +1452,8 @@ static int b44_close(struct net_device *dev)
1445 1452
1446 netif_stop_queue(dev); 1453 netif_stop_queue(dev);
1447 1454
1455 netif_poll_disable(dev);
1456
1448 del_timer_sync(&bp->timer); 1457 del_timer_sync(&bp->timer);
1449 1458
1450 spin_lock_irq(&bp->lock); 1459 spin_lock_irq(&bp->lock);
@@ -1454,13 +1463,14 @@ static int b44_close(struct net_device *dev)
1454#endif 1463#endif
1455 b44_halt(bp); 1464 b44_halt(bp);
1456 b44_free_rings(bp); 1465 b44_free_rings(bp);
1457 bp->flags &= ~B44_FLAG_INIT_COMPLETE;
1458 netif_carrier_off(bp->dev); 1466 netif_carrier_off(bp->dev);
1459 1467
1460 spin_unlock_irq(&bp->lock); 1468 spin_unlock_irq(&bp->lock);
1461 1469
1462 free_irq(dev->irq, dev); 1470 free_irq(dev->irq, dev);
1463 1471
1472 netif_poll_enable(dev);
1473
1464 b44_free_consistent(bp); 1474 b44_free_consistent(bp);
1465 1475
1466 return 0; 1476 return 0;
@@ -1525,8 +1535,6 @@ static void __b44_set_rx_mode(struct net_device *dev)
1525{ 1535{
1526 struct b44 *bp = netdev_priv(dev); 1536 struct b44 *bp = netdev_priv(dev);
1527 u32 val; 1537 u32 val;
1528 int i=0;
1529 unsigned char zero[6] = {0,0,0,0,0,0};
1530 1538
1531 val = br32(bp, B44_RXCONFIG); 1539 val = br32(bp, B44_RXCONFIG);
1532 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); 1540 val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
@@ -1534,14 +1542,17 @@ static void __b44_set_rx_mode(struct net_device *dev)
1534 val |= RXCONFIG_PROMISC; 1542 val |= RXCONFIG_PROMISC;
1535 bw32(bp, B44_RXCONFIG, val); 1543 bw32(bp, B44_RXCONFIG, val);
1536 } else { 1544 } else {
1545 unsigned char zero[6] = {0, 0, 0, 0, 0, 0};
1546 int i = 0;
1547
1537 __b44_set_mac_addr(bp); 1548 __b44_set_mac_addr(bp);
1538 1549
1539 if (dev->flags & IFF_ALLMULTI) 1550 if (dev->flags & IFF_ALLMULTI)
1540 val |= RXCONFIG_ALLMULTI; 1551 val |= RXCONFIG_ALLMULTI;
1541 else 1552 else
1542 i=__b44_load_mcast(bp, dev); 1553 i = __b44_load_mcast(bp, dev);
1543 1554
1544 for(;i<64;i++) { 1555 for (; i < 64; i++) {
1545 __b44_cam_write(bp, zero, i); 1556 __b44_cam_write(bp, zero, i);
1546 } 1557 }
1547 bw32(bp, B44_RXCONFIG, val); 1558 bw32(bp, B44_RXCONFIG, val);
@@ -1605,7 +1616,7 @@ static int b44_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1605{ 1616{
1606 struct b44 *bp = netdev_priv(dev); 1617 struct b44 *bp = netdev_priv(dev);
1607 1618
1608 if (!(bp->flags & B44_FLAG_INIT_COMPLETE)) 1619 if (!netif_running(dev))
1609 return -EAGAIN; 1620 return -EAGAIN;
1610 cmd->supported = (SUPPORTED_Autoneg); 1621 cmd->supported = (SUPPORTED_Autoneg);
1611 cmd->supported |= (SUPPORTED_100baseT_Half | 1622 cmd->supported |= (SUPPORTED_100baseT_Half |
@@ -1643,7 +1654,7 @@ static int b44_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1643{ 1654{
1644 struct b44 *bp = netdev_priv(dev); 1655 struct b44 *bp = netdev_priv(dev);
1645 1656
1646 if (!(bp->flags & B44_FLAG_INIT_COMPLETE)) 1657 if (!netif_running(dev))
1647 return -EAGAIN; 1658 return -EAGAIN;
1648 1659
1649 /* We do not support gigabit. */ 1660 /* We do not support gigabit. */
@@ -1773,6 +1784,37 @@ static int b44_set_pauseparam(struct net_device *dev,
1773 return 0; 1784 return 0;
1774} 1785}
1775 1786
1787static void b44_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1788{
1789 switch(stringset) {
1790 case ETH_SS_STATS:
1791 memcpy(data, *b44_gstrings, sizeof(b44_gstrings));
1792 break;
1793 }
1794}
1795
1796static int b44_get_stats_count(struct net_device *dev)
1797{
1798 return ARRAY_SIZE(b44_gstrings);
1799}
1800
1801static void b44_get_ethtool_stats(struct net_device *dev,
1802 struct ethtool_stats *stats, u64 *data)
1803{
1804 struct b44 *bp = netdev_priv(dev);
1805 u32 *val = &bp->hw_stats.tx_good_octets;
1806 u32 i;
1807
1808 spin_lock_irq(&bp->lock);
1809
1810 b44_stats_update(bp);
1811
1812 for (i = 0; i < ARRAY_SIZE(b44_gstrings); i++)
1813 *data++ = *val++;
1814
1815 spin_unlock_irq(&bp->lock);
1816}
1817
1776static struct ethtool_ops b44_ethtool_ops = { 1818static struct ethtool_ops b44_ethtool_ops = {
1777 .get_drvinfo = b44_get_drvinfo, 1819 .get_drvinfo = b44_get_drvinfo,
1778 .get_settings = b44_get_settings, 1820 .get_settings = b44_get_settings,
@@ -1785,6 +1827,9 @@ static struct ethtool_ops b44_ethtool_ops = {
1785 .set_pauseparam = b44_set_pauseparam, 1827 .set_pauseparam = b44_set_pauseparam,
1786 .get_msglevel = b44_get_msglevel, 1828 .get_msglevel = b44_get_msglevel,
1787 .set_msglevel = b44_set_msglevel, 1829 .set_msglevel = b44_set_msglevel,
1830 .get_strings = b44_get_strings,
1831 .get_stats_count = b44_get_stats_count,
1832 .get_ethtool_stats = b44_get_ethtool_stats,
1788 .get_perm_addr = ethtool_op_get_perm_addr, 1833 .get_perm_addr = ethtool_op_get_perm_addr,
1789}; 1834};
1790 1835
@@ -1893,9 +1938,9 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
1893 1938
1894 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK); 1939 err = pci_set_consistent_dma_mask(pdev, (u64) B44_DMA_MASK);
1895 if (err) { 1940 if (err) {
1896 printk(KERN_ERR PFX "No usable DMA configuration, " 1941 printk(KERN_ERR PFX "No usable DMA configuration, "
1897 "aborting.\n"); 1942 "aborting.\n");
1898 goto err_out_free_res; 1943 goto err_out_free_res;
1899 } 1944 }
1900 1945
1901 b44reg_base = pci_resource_start(pdev, 0); 1946 b44reg_base = pci_resource_start(pdev, 0);
@@ -1917,10 +1962,8 @@ static int __devinit b44_init_one(struct pci_dev *pdev,
1917 bp = netdev_priv(dev); 1962 bp = netdev_priv(dev);
1918 bp->pdev = pdev; 1963 bp->pdev = pdev;
1919 bp->dev = dev; 1964 bp->dev = dev;
1920 if (b44_debug >= 0) 1965
1921 bp->msg_enable = (1 << b44_debug) - 1; 1966 bp->msg_enable = netif_msg_init(b44_debug, B44_DEF_MSG_ENABLE);
1922 else
1923 bp->msg_enable = B44_DEF_MSG_ENABLE;
1924 1967
1925 spin_lock_init(&bp->lock); 1968 spin_lock_init(&bp->lock);
1926 1969
@@ -2010,17 +2053,14 @@ err_out_disable_pdev:
2010static void __devexit b44_remove_one(struct pci_dev *pdev) 2053static void __devexit b44_remove_one(struct pci_dev *pdev)
2011{ 2054{
2012 struct net_device *dev = pci_get_drvdata(pdev); 2055 struct net_device *dev = pci_get_drvdata(pdev);
2056 struct b44 *bp = netdev_priv(dev);
2013 2057
2014 if (dev) { 2058 unregister_netdev(dev);
2015 struct b44 *bp = netdev_priv(dev); 2059 iounmap(bp->regs);
2016 2060 free_netdev(dev);
2017 unregister_netdev(dev); 2061 pci_release_regions(pdev);
2018 iounmap(bp->regs); 2062 pci_disable_device(pdev);
2019 free_netdev(dev); 2063 pci_set_drvdata(pdev, NULL);
2020 pci_release_regions(pdev);
2021 pci_disable_device(pdev);
2022 pci_set_drvdata(pdev, NULL);
2023 }
2024} 2064}
2025 2065
2026static int b44_suspend(struct pci_dev *pdev, pm_message_t state) 2066static int b44_suspend(struct pci_dev *pdev, pm_message_t state)