aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-09-02 03:32:56 -0400
committerDavid S. Miller <davem@davemloft.net>2009-09-02 03:32:56 -0400
commit6cdee2f96a97f6da26bd3759c3f8823332fbb438 (patch)
treeec79086f05ffc3bdf1aecc37e108ccfc3a95450d /drivers/net
parent0625491493d9000e4556bf566d205c28c8e7dc4e (diff)
parent2fbd3da3877ad8d923b055e5996f80b4d4a6daf4 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
Conflicts: drivers/net/yellowfin.c
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/Kconfig4
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/b44.c5
-rw-r--r--drivers/net/bnx2.c17
-rw-r--r--drivers/net/bnx2.h1
-rw-r--r--drivers/net/cnic.c143
-rw-r--r--drivers/net/cnic.h1
-rw-r--r--drivers/net/cnic_if.h1
-rw-r--r--drivers/net/e100.c2
-rw-r--r--drivers/net/e1000e/netdev.c22
-rw-r--r--drivers/net/fec_mpc52xx.c5
-rw-r--r--drivers/net/gianfar.c1
-rw-r--r--drivers/net/ibm_newemac/core.c2
-rw-r--r--drivers/net/irda/au1k_ir.c4
-rw-r--r--drivers/net/irda/pxaficp_ir.c4
-rw-r--r--drivers/net/irda/sa1100_ir.c4
-rw-r--r--drivers/net/ixp2000/ixpdev.c5
-rw-r--r--drivers/net/macb.c7
-rw-r--r--drivers/net/mlx4/en_tx.c5
-rw-r--r--drivers/net/smc91x.c40
-rw-r--r--drivers/net/tulip/tulip_core.c5
-rw-r--r--drivers/net/ucc_geth.c5
-rw-r--r--drivers/net/usb/pegasus.h2
-rw-r--r--drivers/net/via-rhine.c5
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/net/virtio_net.c61
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c120
-rw-r--r--drivers/net/wireless/orinoco/hw.c2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c14
-rw-r--r--drivers/net/yellowfin.c28
30 files changed, 331 insertions, 190 deletions
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index fed9bdaecfa2..21333c18f344 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1727,12 +1727,14 @@ config KS8842
1727 tristate "Micrel KSZ8842" 1727 tristate "Micrel KSZ8842"
1728 depends on HAS_IOMEM 1728 depends on HAS_IOMEM
1729 help 1729 help
1730 This platform driver is for Micrel KSZ8842 chip. 1730 This platform driver is for Micrel KSZ8842 / KS8842
1731 2-port ethernet switch chip (managed, VLAN, QoS).
1731 1732
1732config KS8851 1733config KS8851
1733 tristate "Micrel KS8851 SPI" 1734 tristate "Micrel KS8851 SPI"
1734 depends on SPI 1735 depends on SPI
1735 select MII 1736 select MII
1737 select CRC32
1736 help 1738 help
1737 SPI driver for Micrel KS8851 SPI attached network chip. 1739 SPI driver for Micrel KS8851 SPI attached network chip.
1738 1740
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 890716f6c018..3a0948f02b46 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -1098,7 +1098,7 @@ static struct platform_driver w90p910_ether_driver = {
1098 .probe = w90p910_ether_probe, 1098 .probe = w90p910_ether_probe,
1099 .remove = __devexit_p(w90p910_ether_remove), 1099 .remove = __devexit_p(w90p910_ether_remove),
1100 .driver = { 1100 .driver = {
1101 .name = "w90p910-emc", 1101 .name = "nuc900-emc",
1102 .owner = THIS_MODULE, 1102 .owner = THIS_MODULE,
1103 }, 1103 },
1104}; 1104};
@@ -1119,5 +1119,5 @@ module_exit(w90p910_ether_exit);
1119MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>"); 1119MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
1120MODULE_DESCRIPTION("w90p910 MAC driver!"); 1120MODULE_DESCRIPTION("w90p910 MAC driver!");
1121MODULE_LICENSE("GPL"); 1121MODULE_LICENSE("GPL");
1122MODULE_ALIAS("platform:w90p910-emc"); 1122MODULE_ALIAS("platform:nuc900-emc");
1123 1123
diff --git a/drivers/net/b44.c b/drivers/net/b44.c
index bee510177a3f..951735c9ec0b 100644
--- a/drivers/net/b44.c
+++ b/drivers/net/b44.c
@@ -952,9 +952,10 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
952 int rc = NETDEV_TX_OK; 952 int rc = NETDEV_TX_OK;
953 dma_addr_t mapping; 953 dma_addr_t mapping;
954 u32 len, entry, ctrl; 954 u32 len, entry, ctrl;
955 unsigned long flags;
955 956
956 len = skb->len; 957 len = skb->len;
957 spin_lock_irq(&bp->lock); 958 spin_lock_irqsave(&bp->lock, flags);
958 959
959 /* This is a hard error, log it. */ 960 /* This is a hard error, log it. */
960 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { 961 if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
@@ -1027,7 +1028,7 @@ static netdev_tx_t b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
1027 dev->trans_start = jiffies; 1028 dev->trans_start = jiffies;
1028 1029
1029out_unlock: 1030out_unlock:
1030 spin_unlock_irq(&bp->lock); 1031 spin_unlock_irqrestore(&bp->lock, flags);
1031 1032
1032 return rc; 1033 return rc;
1033 1034
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index fcaf3bc8277e..1357d548e698 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -401,9 +401,11 @@ static int bnx2_unregister_cnic(struct net_device *dev)
401 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 401 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 402 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403 403
404 mutex_lock(&bp->cnic_lock);
404 cp->drv_state = 0; 405 cp->drv_state = 0;
405 bnapi->cnic_present = 0; 406 bnapi->cnic_present = 0;
406 rcu_assign_pointer(bp->cnic_ops, NULL); 407 rcu_assign_pointer(bp->cnic_ops, NULL);
408 mutex_unlock(&bp->cnic_lock);
407 synchronize_rcu(); 409 synchronize_rcu();
408 return 0; 410 return 0;
409} 411}
@@ -431,13 +433,13 @@ bnx2_cnic_stop(struct bnx2 *bp)
431 struct cnic_ops *c_ops; 433 struct cnic_ops *c_ops;
432 struct cnic_ctl_info info; 434 struct cnic_ctl_info info;
433 435
434 rcu_read_lock(); 436 mutex_lock(&bp->cnic_lock);
435 c_ops = rcu_dereference(bp->cnic_ops); 437 c_ops = bp->cnic_ops;
436 if (c_ops) { 438 if (c_ops) {
437 info.cmd = CNIC_CTL_STOP_CMD; 439 info.cmd = CNIC_CTL_STOP_CMD;
438 c_ops->cnic_ctl(bp->cnic_data, &info); 440 c_ops->cnic_ctl(bp->cnic_data, &info);
439 } 441 }
440 rcu_read_unlock(); 442 mutex_unlock(&bp->cnic_lock);
441} 443}
442 444
443static void 445static void
@@ -446,8 +448,8 @@ bnx2_cnic_start(struct bnx2 *bp)
446 struct cnic_ops *c_ops; 448 struct cnic_ops *c_ops;
447 struct cnic_ctl_info info; 449 struct cnic_ctl_info info;
448 450
449 rcu_read_lock(); 451 mutex_lock(&bp->cnic_lock);
450 c_ops = rcu_dereference(bp->cnic_ops); 452 c_ops = bp->cnic_ops;
451 if (c_ops) { 453 if (c_ops) {
452 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) { 454 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
453 struct bnx2_napi *bnapi = &bp->bnx2_napi[0]; 455 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
@@ -457,7 +459,7 @@ bnx2_cnic_start(struct bnx2 *bp)
457 info.cmd = CNIC_CTL_START_CMD; 459 info.cmd = CNIC_CTL_START_CMD;
458 c_ops->cnic_ctl(bp->cnic_data, &info); 460 c_ops->cnic_ctl(bp->cnic_data, &info);
459 } 461 }
460 rcu_read_unlock(); 462 mutex_unlock(&bp->cnic_lock);
461} 463}
462 464
463#else 465#else
@@ -7687,6 +7689,9 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7687 7689
7688 spin_lock_init(&bp->phy_lock); 7690 spin_lock_init(&bp->phy_lock);
7689 spin_lock_init(&bp->indirect_lock); 7691 spin_lock_init(&bp->indirect_lock);
7692#ifdef BCM_CNIC
7693 mutex_init(&bp->cnic_lock);
7694#endif
7690 INIT_WORK(&bp->reset_task, bnx2_reset_task); 7695 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7691 7696
7692 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0); 7697 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index 7544188b6541..6c7f795d12de 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -6903,6 +6903,7 @@ struct bnx2 {
6903 u32 idle_chk_status_idx; 6903 u32 idle_chk_status_idx;
6904 6904
6905#ifdef BCM_CNIC 6905#ifdef BCM_CNIC
6906 struct mutex cnic_lock;
6906 struct cnic_eth_dev cnic_eth_dev; 6907 struct cnic_eth_dev cnic_eth_dev;
6907#endif 6908#endif
6908 6909
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index f8a09236dc0a..d45eacb76702 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -138,6 +138,16 @@ static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
138 return NULL; 138 return NULL;
139} 139}
140 140
141static inline void ulp_get(struct cnic_ulp_ops *ulp_ops)
142{
143 atomic_inc(&ulp_ops->ref_count);
144}
145
146static inline void ulp_put(struct cnic_ulp_ops *ulp_ops)
147{
148 atomic_dec(&ulp_ops->ref_count);
149}
150
141static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val) 151static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
142{ 152{
143 struct cnic_local *cp = dev->cnic_priv; 153 struct cnic_local *cp = dev->cnic_priv;
@@ -358,6 +368,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
358 } 368 }
359 read_unlock(&cnic_dev_lock); 369 read_unlock(&cnic_dev_lock);
360 370
371 atomic_set(&ulp_ops->ref_count, 0);
361 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops); 372 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
362 mutex_unlock(&cnic_lock); 373 mutex_unlock(&cnic_lock);
363 374
@@ -379,6 +390,8 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
379int cnic_unregister_driver(int ulp_type) 390int cnic_unregister_driver(int ulp_type)
380{ 391{
381 struct cnic_dev *dev; 392 struct cnic_dev *dev;
393 struct cnic_ulp_ops *ulp_ops;
394 int i = 0;
382 395
383 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 396 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
384 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 397 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
@@ -386,7 +399,8 @@ int cnic_unregister_driver(int ulp_type)
386 return -EINVAL; 399 return -EINVAL;
387 } 400 }
388 mutex_lock(&cnic_lock); 401 mutex_lock(&cnic_lock);
389 if (!cnic_ulp_tbl[ulp_type]) { 402 ulp_ops = cnic_ulp_tbl[ulp_type];
403 if (!ulp_ops) {
390 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not " 404 printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
391 "been registered\n", ulp_type); 405 "been registered\n", ulp_type);
392 goto out_unlock; 406 goto out_unlock;
@@ -411,6 +425,14 @@ int cnic_unregister_driver(int ulp_type)
411 425
412 mutex_unlock(&cnic_lock); 426 mutex_unlock(&cnic_lock);
413 synchronize_rcu(); 427 synchronize_rcu();
428 while ((atomic_read(&ulp_ops->ref_count) != 0) && (i < 20)) {
429 msleep(100);
430 i++;
431 }
432
433 if (atomic_read(&ulp_ops->ref_count) != 0)
434 printk(KERN_WARNING PFX "%s: Failed waiting for ref count to go"
435 " to zero.\n", dev->netdev->name);
414 return 0; 436 return 0;
415 437
416out_unlock: 438out_unlock:
@@ -466,6 +488,7 @@ EXPORT_SYMBOL(cnic_register_driver);
466static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type) 488static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
467{ 489{
468 struct cnic_local *cp = dev->cnic_priv; 490 struct cnic_local *cp = dev->cnic_priv;
491 int i = 0;
469 492
470 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 493 if (ulp_type >= MAX_CNIC_ULP_TYPE) {
471 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", 494 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
@@ -486,6 +509,15 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
486 509
487 synchronize_rcu(); 510 synchronize_rcu();
488 511
512 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
513 i < 20) {
514 msleep(100);
515 i++;
516 }
517 if (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]))
518 printk(KERN_WARNING PFX "%s: Failed waiting for ULP up call"
519 " to complete.\n", dev->netdev->name);
520
489 return 0; 521 return 0;
490} 522}
491EXPORT_SYMBOL(cnic_unregister_driver); 523EXPORT_SYMBOL(cnic_unregister_driver);
@@ -1101,18 +1133,23 @@ static void cnic_ulp_stop(struct cnic_dev *dev)
1101 if (cp->cnic_uinfo) 1133 if (cp->cnic_uinfo)
1102 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 1134 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
1103 1135
1104 rcu_read_lock();
1105 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 1136 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1106 struct cnic_ulp_ops *ulp_ops; 1137 struct cnic_ulp_ops *ulp_ops;
1107 1138
1108 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 1139 mutex_lock(&cnic_lock);
1109 if (!ulp_ops) 1140 ulp_ops = cp->ulp_ops[if_type];
1141 if (!ulp_ops) {
1142 mutex_unlock(&cnic_lock);
1110 continue; 1143 continue;
1144 }
1145 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1146 mutex_unlock(&cnic_lock);
1111 1147
1112 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type])) 1148 if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1113 ulp_ops->cnic_stop(cp->ulp_handle[if_type]); 1149 ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
1150
1151 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1114 } 1152 }
1115 rcu_read_unlock();
1116} 1153}
1117 1154
1118static void cnic_ulp_start(struct cnic_dev *dev) 1155static void cnic_ulp_start(struct cnic_dev *dev)
@@ -1120,18 +1157,23 @@ static void cnic_ulp_start(struct cnic_dev *dev)
1120 struct cnic_local *cp = dev->cnic_priv; 1157 struct cnic_local *cp = dev->cnic_priv;
1121 int if_type; 1158 int if_type;
1122 1159
1123 rcu_read_lock();
1124 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) { 1160 for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
1125 struct cnic_ulp_ops *ulp_ops; 1161 struct cnic_ulp_ops *ulp_ops;
1126 1162
1127 ulp_ops = rcu_dereference(cp->ulp_ops[if_type]); 1163 mutex_lock(&cnic_lock);
1128 if (!ulp_ops || !ulp_ops->cnic_start) 1164 ulp_ops = cp->ulp_ops[if_type];
1165 if (!ulp_ops || !ulp_ops->cnic_start) {
1166 mutex_unlock(&cnic_lock);
1129 continue; 1167 continue;
1168 }
1169 set_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1170 mutex_unlock(&cnic_lock);
1130 1171
1131 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type])) 1172 if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
1132 ulp_ops->cnic_start(cp->ulp_handle[if_type]); 1173 ulp_ops->cnic_start(cp->ulp_handle[if_type]);
1174
1175 clear_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[if_type]);
1133 } 1176 }
1134 rcu_read_unlock();
1135} 1177}
1136 1178
1137static int cnic_ctl(void *data, struct cnic_ctl_info *info) 1179static int cnic_ctl(void *data, struct cnic_ctl_info *info)
@@ -1141,22 +1183,18 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1141 switch (info->cmd) { 1183 switch (info->cmd) {
1142 case CNIC_CTL_STOP_CMD: 1184 case CNIC_CTL_STOP_CMD:
1143 cnic_hold(dev); 1185 cnic_hold(dev);
1144 mutex_lock(&cnic_lock);
1145 1186
1146 cnic_ulp_stop(dev); 1187 cnic_ulp_stop(dev);
1147 cnic_stop_hw(dev); 1188 cnic_stop_hw(dev);
1148 1189
1149 mutex_unlock(&cnic_lock);
1150 cnic_put(dev); 1190 cnic_put(dev);
1151 break; 1191 break;
1152 case CNIC_CTL_START_CMD: 1192 case CNIC_CTL_START_CMD:
1153 cnic_hold(dev); 1193 cnic_hold(dev);
1154 mutex_lock(&cnic_lock);
1155 1194
1156 if (!cnic_start_hw(dev)) 1195 if (!cnic_start_hw(dev))
1157 cnic_ulp_start(dev); 1196 cnic_ulp_start(dev);
1158 1197
1159 mutex_unlock(&cnic_lock);
1160 cnic_put(dev); 1198 cnic_put(dev);
1161 break; 1199 break;
1162 default: 1200 default:
@@ -1170,19 +1208,23 @@ static void cnic_ulp_init(struct cnic_dev *dev)
1170 int i; 1208 int i;
1171 struct cnic_local *cp = dev->cnic_priv; 1209 struct cnic_local *cp = dev->cnic_priv;
1172 1210
1173 rcu_read_lock();
1174 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1211 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1175 struct cnic_ulp_ops *ulp_ops; 1212 struct cnic_ulp_ops *ulp_ops;
1176 1213
1177 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); 1214 mutex_lock(&cnic_lock);
1178 if (!ulp_ops || !ulp_ops->cnic_init) 1215 ulp_ops = cnic_ulp_tbl[i];
1216 if (!ulp_ops || !ulp_ops->cnic_init) {
1217 mutex_unlock(&cnic_lock);
1179 continue; 1218 continue;
1219 }
1220 ulp_get(ulp_ops);
1221 mutex_unlock(&cnic_lock);
1180 1222
1181 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1223 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1182 ulp_ops->cnic_init(dev); 1224 ulp_ops->cnic_init(dev);
1183 1225
1226 ulp_put(ulp_ops);
1184 } 1227 }
1185 rcu_read_unlock();
1186} 1228}
1187 1229
1188static void cnic_ulp_exit(struct cnic_dev *dev) 1230static void cnic_ulp_exit(struct cnic_dev *dev)
@@ -1190,19 +1232,23 @@ static void cnic_ulp_exit(struct cnic_dev *dev)
1190 int i; 1232 int i;
1191 struct cnic_local *cp = dev->cnic_priv; 1233 struct cnic_local *cp = dev->cnic_priv;
1192 1234
1193 rcu_read_lock();
1194 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) { 1235 for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
1195 struct cnic_ulp_ops *ulp_ops; 1236 struct cnic_ulp_ops *ulp_ops;
1196 1237
1197 ulp_ops = rcu_dereference(cnic_ulp_tbl[i]); 1238 mutex_lock(&cnic_lock);
1198 if (!ulp_ops || !ulp_ops->cnic_exit) 1239 ulp_ops = cnic_ulp_tbl[i];
1240 if (!ulp_ops || !ulp_ops->cnic_exit) {
1241 mutex_unlock(&cnic_lock);
1199 continue; 1242 continue;
1243 }
1244 ulp_get(ulp_ops);
1245 mutex_unlock(&cnic_lock);
1200 1246
1201 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i])) 1247 if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
1202 ulp_ops->cnic_exit(dev); 1248 ulp_ops->cnic_exit(dev);
1203 1249
1250 ulp_put(ulp_ops);
1204 } 1251 }
1205 rcu_read_unlock();
1206} 1252}
1207 1253
1208static int cnic_cm_offload_pg(struct cnic_sock *csk) 1254static int cnic_cm_offload_pg(struct cnic_sock *csk)
@@ -2418,21 +2464,45 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2418 return 0; 2464 return 0;
2419} 2465}
2420 2466
2421static int cnic_start_hw(struct cnic_dev *dev) 2467static int cnic_register_netdev(struct cnic_dev *dev)
2422{ 2468{
2423 struct cnic_local *cp = dev->cnic_priv; 2469 struct cnic_local *cp = dev->cnic_priv;
2424 struct cnic_eth_dev *ethdev = cp->ethdev; 2470 struct cnic_eth_dev *ethdev = cp->ethdev;
2425 int err; 2471 int err;
2426 2472
2427 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) 2473 if (!ethdev)
2428 return -EALREADY; 2474 return -ENODEV;
2475
2476 if (ethdev->drv_state & CNIC_DRV_STATE_REGD)
2477 return 0;
2429 2478
2430 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev); 2479 err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
2431 if (err) { 2480 if (err)
2432 printk(KERN_ERR PFX "%s: register_cnic failed\n", 2481 printk(KERN_ERR PFX "%s: register_cnic failed\n",
2433 dev->netdev->name); 2482 dev->netdev->name);
2434 goto err2; 2483
2435 } 2484 return err;
2485}
2486
2487static void cnic_unregister_netdev(struct cnic_dev *dev)
2488{
2489 struct cnic_local *cp = dev->cnic_priv;
2490 struct cnic_eth_dev *ethdev = cp->ethdev;
2491
2492 if (!ethdev)
2493 return;
2494
2495 ethdev->drv_unregister_cnic(dev->netdev);
2496}
2497
2498static int cnic_start_hw(struct cnic_dev *dev)
2499{
2500 struct cnic_local *cp = dev->cnic_priv;
2501 struct cnic_eth_dev *ethdev = cp->ethdev;
2502 int err;
2503
2504 if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
2505 return -EALREADY;
2436 2506
2437 dev->regview = ethdev->io_base; 2507 dev->regview = ethdev->io_base;
2438 cp->chip_id = ethdev->chip_id; 2508 cp->chip_id = ethdev->chip_id;
@@ -2463,18 +2533,13 @@ static int cnic_start_hw(struct cnic_dev *dev)
2463 return 0; 2533 return 0;
2464 2534
2465err1: 2535err1:
2466 ethdev->drv_unregister_cnic(dev->netdev);
2467 cp->free_resc(dev); 2536 cp->free_resc(dev);
2468 pci_dev_put(dev->pcidev); 2537 pci_dev_put(dev->pcidev);
2469err2:
2470 return err; 2538 return err;
2471} 2539}
2472 2540
2473static void cnic_stop_bnx2_hw(struct cnic_dev *dev) 2541static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2474{ 2542{
2475 struct cnic_local *cp = dev->cnic_priv;
2476 struct cnic_eth_dev *ethdev = cp->ethdev;
2477
2478 cnic_disable_bnx2_int_sync(dev); 2543 cnic_disable_bnx2_int_sync(dev);
2479 2544
2480 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0); 2545 cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
@@ -2486,8 +2551,6 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2486 cnic_setup_5709_context(dev, 0); 2551 cnic_setup_5709_context(dev, 0);
2487 cnic_free_irq(dev); 2552 cnic_free_irq(dev);
2488 2553
2489 ethdev->drv_unregister_cnic(dev->netdev);
2490
2491 cnic_free_resc(dev); 2554 cnic_free_resc(dev);
2492} 2555}
2493 2556
@@ -2568,7 +2631,7 @@ static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
2568 probe = symbol_get(bnx2_cnic_probe); 2631 probe = symbol_get(bnx2_cnic_probe);
2569 if (probe) { 2632 if (probe) {
2570 ethdev = (*probe)(dev); 2633 ethdev = (*probe)(dev);
2571 symbol_put_addr(probe); 2634 symbol_put(bnx2_cnic_probe);
2572 } 2635 }
2573 if (!ethdev) 2636 if (!ethdev)
2574 return NULL; 2637 return NULL;
@@ -2671,10 +2734,12 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2671 else if (event == NETDEV_UNREGISTER) 2734 else if (event == NETDEV_UNREGISTER)
2672 cnic_ulp_exit(dev); 2735 cnic_ulp_exit(dev);
2673 else if (event == NETDEV_UP) { 2736 else if (event == NETDEV_UP) {
2674 mutex_lock(&cnic_lock); 2737 if (cnic_register_netdev(dev) != 0) {
2738 cnic_put(dev);
2739 goto done;
2740 }
2675 if (!cnic_start_hw(dev)) 2741 if (!cnic_start_hw(dev))
2676 cnic_ulp_start(dev); 2742 cnic_ulp_start(dev);
2677 mutex_unlock(&cnic_lock);
2678 } 2743 }
2679 2744
2680 rcu_read_lock(); 2745 rcu_read_lock();
@@ -2693,10 +2758,9 @@ static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
2693 rcu_read_unlock(); 2758 rcu_read_unlock();
2694 2759
2695 if (event == NETDEV_GOING_DOWN) { 2760 if (event == NETDEV_GOING_DOWN) {
2696 mutex_lock(&cnic_lock);
2697 cnic_ulp_stop(dev); 2761 cnic_ulp_stop(dev);
2698 cnic_stop_hw(dev); 2762 cnic_stop_hw(dev);
2699 mutex_unlock(&cnic_lock); 2763 cnic_unregister_netdev(dev);
2700 } else if (event == NETDEV_UNREGISTER) { 2764 } else if (event == NETDEV_UNREGISTER) {
2701 write_lock(&cnic_dev_lock); 2765 write_lock(&cnic_dev_lock);
2702 list_del_init(&dev->list); 2766 list_del_init(&dev->list);
@@ -2728,6 +2792,7 @@ static void cnic_release(void)
2728 } 2792 }
2729 2793
2730 cnic_ulp_exit(dev); 2794 cnic_ulp_exit(dev);
2795 cnic_unregister_netdev(dev);
2731 list_del_init(&dev->list); 2796 list_del_init(&dev->list);
2732 cnic_free_dev(dev); 2797 cnic_free_dev(dev);
2733 } 2798 }
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 5192d4a9df5a..a94b302bb464 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -176,6 +176,7 @@ struct cnic_local {
176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE]; 176 unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
177#define ULP_F_INIT 0 177#define ULP_F_INIT 0
178#define ULP_F_START 1 178#define ULP_F_START 1
179#define ULP_F_CALL_PENDING 2
179 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE]; 180 struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
180 181
181 /* protected by ulp_lock */ 182 /* protected by ulp_lock */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index d1bce27ee99e..a49235739eef 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -290,6 +290,7 @@ struct cnic_ulp_ops {
290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, 290 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
291 char *data, u16 data_size); 291 char *data, u16 data_size);
292 struct module *owner; 292 struct module *owner;
293 atomic_t ref_count;
293}; 294};
294 295
295extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops); 296extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index 6a3176042c47..679965c2bb86 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -1900,7 +1900,7 @@ static int e100_rx_indicate(struct nic *nic, struct rx *rx,
1900 nic->ru_running = RU_SUSPENDED; 1900 nic->ru_running = RU_SUSPENDED;
1901 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr, 1901 pci_dma_sync_single_for_device(nic->pdev, rx->dma_addr,
1902 sizeof(struct rfd), 1902 sizeof(struct rfd),
1903 PCI_DMA_BIDIRECTIONAL); 1903 PCI_DMA_FROMDEVICE);
1904 return -ENODATA; 1904 return -ENODATA;
1905 } 1905 }
1906 1906
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 428bf6d72f37..0f8d9619adea 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -4539,8 +4539,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4539 /* Allow time for pending master requests to run */ 4539 /* Allow time for pending master requests to run */
4540 e1000e_disable_pcie_master(&adapter->hw); 4540 e1000e_disable_pcie_master(&adapter->hw);
4541 4541
4542 if ((adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) && 4542 if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
4543 !(hw->mac.ops.check_mng_mode(hw))) {
4544 /* enable wakeup by the PHY */ 4543 /* enable wakeup by the PHY */
4545 retval = e1000_init_phy_wakeup(adapter, wufc); 4544 retval = e1000_init_phy_wakeup(adapter, wufc);
4546 if (retval) 4545 if (retval)
@@ -4558,7 +4557,8 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake)
4558 *enable_wake = !!wufc; 4557 *enable_wake = !!wufc;
4559 4558
4560 /* make sure adapter isn't asleep if manageability is enabled */ 4559 /* make sure adapter isn't asleep if manageability is enabled */
4561 if (adapter->flags & FLAG_MNG_PT_ENABLED) 4560 if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
4561 (hw->mac.ops.check_mng_mode(hw)))
4562 *enable_wake = true; 4562 *enable_wake = true;
4563 4563
4564 if (adapter->hw.phy.type == e1000_phy_igp_3) 4564 if (adapter->hw.phy.type == e1000_phy_igp_3)
@@ -4671,14 +4671,6 @@ static int e1000_resume(struct pci_dev *pdev)
4671 return err; 4671 return err;
4672 } 4672 }
4673 4673
4674 /* AER (Advanced Error Reporting) hooks */
4675 err = pci_enable_pcie_error_reporting(pdev);
4676 if (err) {
4677 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4678 "0x%x\n", err);
4679 /* non-fatal, continue */
4680 }
4681
4682 pci_set_master(pdev); 4674 pci_set_master(pdev);
4683 4675
4684 pci_enable_wake(pdev, PCI_D3hot, 0); 4676 pci_enable_wake(pdev, PCI_D3hot, 0);
@@ -4991,6 +4983,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
4991 if (err) 4983 if (err)
4992 goto err_pci_reg; 4984 goto err_pci_reg;
4993 4985
4986 /* AER (Advanced Error Reporting) hooks */
4987 err = pci_enable_pcie_error_reporting(pdev);
4988 if (err) {
4989 dev_err(&pdev->dev, "pci_enable_pcie_error_reporting failed "
4990 "0x%x\n", err);
4991 /* non-fatal, continue */
4992 }
4993
4994 pci_set_master(pdev); 4994 pci_set_master(pdev);
4995 /* PCI config space info */ 4995 /* PCI config space info */
4996 err = pci_save_state(pdev); 4996 err = pci_save_state(pdev);
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index cc786333d95c..c40113f58963 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -309,6 +309,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
309{ 309{
310 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 310 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
311 struct bcom_fec_bd *bd; 311 struct bcom_fec_bd *bd;
312 unsigned long flags;
312 313
313 if (bcom_queue_full(priv->tx_dmatsk)) { 314 if (bcom_queue_full(priv->tx_dmatsk)) {
314 if (net_ratelimit()) 315 if (net_ratelimit())
@@ -316,7 +317,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
316 return NETDEV_TX_BUSY; 317 return NETDEV_TX_BUSY;
317 } 318 }
318 319
319 spin_lock_irq(&priv->lock); 320 spin_lock_irqsave(&priv->lock, flags);
320 dev->trans_start = jiffies; 321 dev->trans_start = jiffies;
321 322
322 bd = (struct bcom_fec_bd *) 323 bd = (struct bcom_fec_bd *)
@@ -332,7 +333,7 @@ static int mpc52xx_fec_start_xmit(struct sk_buff *skb, struct net_device *dev)
332 netif_stop_queue(dev); 333 netif_stop_queue(dev);
333 } 334 }
334 335
335 spin_unlock_irq(&priv->lock); 336 spin_unlock_irqrestore(&priv->lock, flags);
336 337
337 return NETDEV_TX_OK; 338 return NETDEV_TX_OK;
338} 339}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 4544da4cf3ce..772104c5ed26 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -490,6 +490,7 @@ static int gfar_remove(struct of_device *ofdev)
490 490
491 dev_set_drvdata(&ofdev->dev, NULL); 491 dev_set_drvdata(&ofdev->dev, NULL);
492 492
493 unregister_netdev(dev);
493 iounmap(priv->regs); 494 iounmap(priv->regs);
494 free_netdev(priv->ndev); 495 free_netdev(priv->ndev);
495 496
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 5443558c439d..d7579e4feefc 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1305,6 +1305,8 @@ static int emac_close(struct net_device *ndev)
1305 1305
1306 free_irq(dev->emac_irq, dev); 1306 free_irq(dev->emac_irq, dev);
1307 1307
1308 netif_carrier_off(ndev);
1309
1308 return 0; 1310 return 0;
1309} 1311}
1310 1312
diff --git a/drivers/net/irda/au1k_ir.c b/drivers/net/irda/au1k_ir.c
index 22baf65e1563..eb424681202d 100644
--- a/drivers/net/irda/au1k_ir.c
+++ b/drivers/net/irda/au1k_ir.c
@@ -23,7 +23,6 @@
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/netdevice.h> 25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28#include <linux/rtnetlink.h> 27#include <linux/rtnetlink.h>
29#include <linux/interrupt.h> 28#include <linux/interrupt.h>
@@ -205,9 +204,6 @@ static const struct net_device_ops au1k_irda_netdev_ops = {
205 .ndo_start_xmit = au1k_irda_hard_xmit, 204 .ndo_start_xmit = au1k_irda_hard_xmit,
206 .ndo_tx_timeout = au1k_tx_timeout, 205 .ndo_tx_timeout = au1k_tx_timeout,
207 .ndo_do_ioctl = au1k_irda_ioctl, 206 .ndo_do_ioctl = au1k_irda_ioctl,
208 .ndo_change_mtu = eth_change_mtu,
209 .ndo_validate_addr = eth_validate_addr,
210 .ndo_set_mac_address = eth_mac_addr,
211}; 207};
212 208
213static int au1k_irda_net_init(struct net_device *dev) 209static int au1k_irda_net_init(struct net_device *dev)
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index e76a083f901a..1445e5865196 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -803,9 +803,6 @@ static const struct net_device_ops pxa_irda_netdev_ops = {
803 .ndo_stop = pxa_irda_stop, 803 .ndo_stop = pxa_irda_stop,
804 .ndo_start_xmit = pxa_irda_hard_xmit, 804 .ndo_start_xmit = pxa_irda_hard_xmit,
805 .ndo_do_ioctl = pxa_irda_ioctl, 805 .ndo_do_ioctl = pxa_irda_ioctl,
806 .ndo_change_mtu = eth_change_mtu,
807 .ndo_validate_addr = eth_validate_addr,
808 .ndo_set_mac_address = eth_mac_addr,
809}; 806};
810 807
811static int pxa_irda_probe(struct platform_device *pdev) 808static int pxa_irda_probe(struct platform_device *pdev)
@@ -830,6 +827,7 @@ static int pxa_irda_probe(struct platform_device *pdev)
830 if (!dev) 827 if (!dev)
831 goto err_mem_3; 828 goto err_mem_3;
832 829
830 SET_NETDEV_DEV(dev, &pdev->dev);
833 si = netdev_priv(dev); 831 si = netdev_priv(dev);
834 si->dev = &pdev->dev; 832 si->dev = &pdev->dev;
835 si->pdata = pdev->dev.platform_data; 833 si->pdata = pdev->dev.platform_data;
diff --git a/drivers/net/irda/sa1100_ir.c b/drivers/net/irda/sa1100_ir.c
index 70e6acc597b0..38bf7cf2256d 100644
--- a/drivers/net/irda/sa1100_ir.c
+++ b/drivers/net/irda/sa1100_ir.c
@@ -24,7 +24,6 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/errno.h> 25#include <linux/errno.h>
26#include <linux/netdevice.h> 26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/slab.h> 27#include <linux/slab.h>
29#include <linux/rtnetlink.h> 28#include <linux/rtnetlink.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
@@ -881,9 +880,6 @@ static const struct net_device_ops sa1100_irda_netdev_ops = {
881 .ndo_stop = sa1100_irda_stop, 880 .ndo_stop = sa1100_irda_stop,
882 .ndo_start_xmit = sa1100_irda_hard_xmit, 881 .ndo_start_xmit = sa1100_irda_hard_xmit,
883 .ndo_do_ioctl = sa1100_irda_ioctl, 882 .ndo_do_ioctl = sa1100_irda_ioctl,
884 .ndo_change_mtu = eth_change_mtu,
885 .ndo_validate_addr = eth_validate_addr,
886 .ndo_set_mac_address = eth_mac_addr,
887}; 883};
888 884
889static int sa1100_irda_probe(struct platform_device *pdev) 885static int sa1100_irda_probe(struct platform_device *pdev)
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 588b44d944ce..127243461a51 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -41,6 +41,7 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
41 struct ixpdev_priv *ip = netdev_priv(dev); 41 struct ixpdev_priv *ip = netdev_priv(dev);
42 struct ixpdev_tx_desc *desc; 42 struct ixpdev_tx_desc *desc;
43 int entry; 43 int entry;
44 unsigned long flags;
44 45
45 if (unlikely(skb->len > PAGE_SIZE)) { 46 if (unlikely(skb->len > PAGE_SIZE)) {
46 /* @@@ Count drops. */ 47 /* @@@ Count drops. */
@@ -63,11 +64,11 @@ static int ixpdev_xmit(struct sk_buff *skb, struct net_device *dev)
63 64
64 dev->trans_start = jiffies; 65 dev->trans_start = jiffies;
65 66
66 local_irq_disable(); 67 local_irq_save(flags);
67 ip->tx_queue_entries++; 68 ip->tx_queue_entries++;
68 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN) 69 if (ip->tx_queue_entries == TX_BUF_COUNT_PER_CHAN)
69 netif_stop_queue(dev); 70 netif_stop_queue(dev);
70 local_irq_enable(); 71 local_irq_restore(flags);
71 72
72 return NETDEV_TX_OK; 73 return NETDEV_TX_OK;
73} 74}
diff --git a/drivers/net/macb.c b/drivers/net/macb.c
index d22952c78f13..01aaca99d29f 100644
--- a/drivers/net/macb.c
+++ b/drivers/net/macb.c
@@ -620,6 +620,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
620 dma_addr_t mapping; 620 dma_addr_t mapping;
621 unsigned int len, entry; 621 unsigned int len, entry;
622 u32 ctrl; 622 u32 ctrl;
623 unsigned long flags;
623 624
624#ifdef DEBUG 625#ifdef DEBUG
625 int i; 626 int i;
@@ -635,12 +636,12 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
635#endif 636#endif
636 637
637 len = skb->len; 638 len = skb->len;
638 spin_lock_irq(&bp->lock); 639 spin_lock_irqsave(&bp->lock, flags);
639 640
640 /* This is a hard error, log it. */ 641 /* This is a hard error, log it. */
641 if (TX_BUFFS_AVAIL(bp) < 1) { 642 if (TX_BUFFS_AVAIL(bp) < 1) {
642 netif_stop_queue(dev); 643 netif_stop_queue(dev);
643 spin_unlock_irq(&bp->lock); 644 spin_unlock_irqrestore(&bp->lock, flags);
644 dev_err(&bp->pdev->dev, 645 dev_err(&bp->pdev->dev,
645 "BUG! Tx Ring full when queue awake!\n"); 646 "BUG! Tx Ring full when queue awake!\n");
646 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n", 647 dev_dbg(&bp->pdev->dev, "tx_head = %u, tx_tail = %u\n",
@@ -674,7 +675,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
674 if (TX_BUFFS_AVAIL(bp) < 1) 675 if (TX_BUFFS_AVAIL(bp) < 1)
675 netif_stop_queue(dev); 676 netif_stop_queue(dev);
676 677
677 spin_unlock_irq(&bp->lock); 678 spin_unlock_irqrestore(&bp->lock, flags);
678 679
679 dev->trans_start = jiffies; 680 dev->trans_start = jiffies;
680 681
diff --git a/drivers/net/mlx4/en_tx.c b/drivers/net/mlx4/en_tx.c
index d3d6e991065b..8c7279965b44 100644
--- a/drivers/net/mlx4/en_tx.c
+++ b/drivers/net/mlx4/en_tx.c
@@ -437,6 +437,7 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
437{ 437{
438 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind]; 438 struct mlx4_en_cq *cq = &priv->tx_cq[tx_ind];
439 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind]; 439 struct mlx4_en_tx_ring *ring = &priv->tx_ring[tx_ind];
440 unsigned long flags;
440 441
441 /* If we don't have a pending timer, set one up to catch our recent 442 /* If we don't have a pending timer, set one up to catch our recent
442 post in case the interface becomes idle */ 443 post in case the interface becomes idle */
@@ -445,9 +446,9 @@ static inline void mlx4_en_xmit_poll(struct mlx4_en_priv *priv, int tx_ind)
445 446
446 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */ 447 /* Poll the CQ every mlx4_en_TX_MODER_POLL packets */
447 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0) 448 if ((++ring->poll_cnt & (MLX4_EN_TX_POLL_MODER - 1)) == 0)
448 if (spin_trylock_irq(&ring->comp_lock)) { 449 if (spin_trylock_irqsave(&ring->comp_lock, flags)) {
449 mlx4_en_process_tx_cq(priv->dev, cq); 450 mlx4_en_process_tx_cq(priv->dev, cq);
450 spin_unlock_irq(&ring->comp_lock); 451 spin_unlock_irqrestore(&ring->comp_lock, flags);
451 } 452 }
452} 453}
453 454
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c
index 0f2c52c2e044..61be6d7680f6 100644
--- a/drivers/net/smc91x.c
+++ b/drivers/net/smc91x.c
@@ -196,21 +196,23 @@ static void PRINT_PKT(u_char *buf, int length)
196/* this enables an interrupt in the interrupt mask register */ 196/* this enables an interrupt in the interrupt mask register */
197#define SMC_ENABLE_INT(lp, x) do { \ 197#define SMC_ENABLE_INT(lp, x) do { \
198 unsigned char mask; \ 198 unsigned char mask; \
199 spin_lock_irq(&lp->lock); \ 199 unsigned long smc_enable_flags; \
200 spin_lock_irqsave(&lp->lock, smc_enable_flags); \
200 mask = SMC_GET_INT_MASK(lp); \ 201 mask = SMC_GET_INT_MASK(lp); \
201 mask |= (x); \ 202 mask |= (x); \
202 SMC_SET_INT_MASK(lp, mask); \ 203 SMC_SET_INT_MASK(lp, mask); \
203 spin_unlock_irq(&lp->lock); \ 204 spin_unlock_irqrestore(&lp->lock, smc_enable_flags); \
204} while (0) 205} while (0)
205 206
206/* this disables an interrupt from the interrupt mask register */ 207/* this disables an interrupt from the interrupt mask register */
207#define SMC_DISABLE_INT(lp, x) do { \ 208#define SMC_DISABLE_INT(lp, x) do { \
208 unsigned char mask; \ 209 unsigned char mask; \
209 spin_lock_irq(&lp->lock); \ 210 unsigned long smc_disable_flags; \
211 spin_lock_irqsave(&lp->lock, smc_disable_flags); \
210 mask = SMC_GET_INT_MASK(lp); \ 212 mask = SMC_GET_INT_MASK(lp); \
211 mask &= ~(x); \ 213 mask &= ~(x); \
212 SMC_SET_INT_MASK(lp, mask); \ 214 SMC_SET_INT_MASK(lp, mask); \
213 spin_unlock_irq(&lp->lock); \ 215 spin_unlock_irqrestore(&lp->lock, smc_disable_flags); \
214} while (0) 216} while (0)
215 217
216/* 218/*
@@ -520,21 +522,21 @@ static inline void smc_rcv(struct net_device *dev)
520 * any other concurrent access and C would always interrupt B. But life 522 * any other concurrent access and C would always interrupt B. But life
521 * isn't that easy in a SMP world... 523 * isn't that easy in a SMP world...
522 */ 524 */
523#define smc_special_trylock(lock) \ 525#define smc_special_trylock(lock, flags) \
524({ \ 526({ \
525 int __ret; \ 527 int __ret; \
526 local_irq_disable(); \ 528 local_irq_save(flags); \
527 __ret = spin_trylock(lock); \ 529 __ret = spin_trylock(lock); \
528 if (!__ret) \ 530 if (!__ret) \
529 local_irq_enable(); \ 531 local_irq_restore(flags); \
530 __ret; \ 532 __ret; \
531}) 533})
532#define smc_special_lock(lock) spin_lock_irq(lock) 534#define smc_special_lock(lock, flags) spin_lock_irqsave(lock, flags)
533#define smc_special_unlock(lock) spin_unlock_irq(lock) 535#define smc_special_unlock(lock, flags) spin_unlock_irqrestore(lock, flags)
534#else 536#else
535#define smc_special_trylock(lock) (1) 537#define smc_special_trylock(lock, flags) (1)
536#define smc_special_lock(lock) do { } while (0) 538#define smc_special_lock(lock, flags) do { } while (0)
537#define smc_special_unlock(lock) do { } while (0) 539#define smc_special_unlock(lock, flags) do { } while (0)
538#endif 540#endif
539 541
540/* 542/*
@@ -548,10 +550,11 @@ static void smc_hardware_send_pkt(unsigned long data)
548 struct sk_buff *skb; 550 struct sk_buff *skb;
549 unsigned int packet_no, len; 551 unsigned int packet_no, len;
550 unsigned char *buf; 552 unsigned char *buf;
553 unsigned long flags;
551 554
552 DBG(3, "%s: %s\n", dev->name, __func__); 555 DBG(3, "%s: %s\n", dev->name, __func__);
553 556
554 if (!smc_special_trylock(&lp->lock)) { 557 if (!smc_special_trylock(&lp->lock, flags)) {
555 netif_stop_queue(dev); 558 netif_stop_queue(dev);
556 tasklet_schedule(&lp->tx_task); 559 tasklet_schedule(&lp->tx_task);
557 return; 560 return;
@@ -559,7 +562,7 @@ static void smc_hardware_send_pkt(unsigned long data)
559 562
560 skb = lp->pending_tx_skb; 563 skb = lp->pending_tx_skb;
561 if (unlikely(!skb)) { 564 if (unlikely(!skb)) {
562 smc_special_unlock(&lp->lock); 565 smc_special_unlock(&lp->lock, flags);
563 return; 566 return;
564 } 567 }
565 lp->pending_tx_skb = NULL; 568 lp->pending_tx_skb = NULL;
@@ -569,7 +572,7 @@ static void smc_hardware_send_pkt(unsigned long data)
569 printk("%s: Memory allocation failed.\n", dev->name); 572 printk("%s: Memory allocation failed.\n", dev->name);
570 dev->stats.tx_errors++; 573 dev->stats.tx_errors++;
571 dev->stats.tx_fifo_errors++; 574 dev->stats.tx_fifo_errors++;
572 smc_special_unlock(&lp->lock); 575 smc_special_unlock(&lp->lock, flags);
573 goto done; 576 goto done;
574 } 577 }
575 578
@@ -608,7 +611,7 @@ static void smc_hardware_send_pkt(unsigned long data)
608 611
609 /* queue the packet for TX */ 612 /* queue the packet for TX */
610 SMC_SET_MMU_CMD(lp, MC_ENQUEUE); 613 SMC_SET_MMU_CMD(lp, MC_ENQUEUE);
611 smc_special_unlock(&lp->lock); 614 smc_special_unlock(&lp->lock, flags);
612 615
613 dev->trans_start = jiffies; 616 dev->trans_start = jiffies;
614 dev->stats.tx_packets++; 617 dev->stats.tx_packets++;
@@ -633,6 +636,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
633 struct smc_local *lp = netdev_priv(dev); 636 struct smc_local *lp = netdev_priv(dev);
634 void __iomem *ioaddr = lp->base; 637 void __iomem *ioaddr = lp->base;
635 unsigned int numPages, poll_count, status; 638 unsigned int numPages, poll_count, status;
639 unsigned long flags;
636 640
637 DBG(3, "%s: %s\n", dev->name, __func__); 641 DBG(3, "%s: %s\n", dev->name, __func__);
638 642
@@ -658,7 +662,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
658 return NETDEV_TX_OK; 662 return NETDEV_TX_OK;
659 } 663 }
660 664
661 smc_special_lock(&lp->lock); 665 smc_special_lock(&lp->lock, flags);
662 666
663 /* now, try to allocate the memory */ 667 /* now, try to allocate the memory */
664 SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages); 668 SMC_SET_MMU_CMD(lp, MC_ALLOC | numPages);
@@ -676,7 +680,7 @@ static int smc_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
676 } 680 }
677 } while (--poll_count); 681 } while (--poll_count);
678 682
679 smc_special_unlock(&lp->lock); 683 smc_special_unlock(&lp->lock, flags);
680 684
681 lp->pending_tx_skb = skb; 685 lp->pending_tx_skb = skb;
682 if (!poll_count) { 686 if (!poll_count) {
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 2a01f0e421ab..b89b73c0b30b 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -653,8 +653,9 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
653 int entry; 653 int entry;
654 u32 flag; 654 u32 flag;
655 dma_addr_t mapping; 655 dma_addr_t mapping;
656 unsigned long flags;
656 657
657 spin_lock_irq(&tp->lock); 658 spin_lock_irqsave(&tp->lock, flags);
658 659
659 /* Calculate the next Tx descriptor entry. */ 660 /* Calculate the next Tx descriptor entry. */
660 entry = tp->cur_tx % TX_RING_SIZE; 661 entry = tp->cur_tx % TX_RING_SIZE;
@@ -689,7 +690,7 @@ tulip_start_xmit(struct sk_buff *skb, struct net_device *dev)
689 /* Trigger an immediate transmit demand. */ 690 /* Trigger an immediate transmit demand. */
690 iowrite32(0, tp->base_addr + CSR1); 691 iowrite32(0, tp->base_addr + CSR1);
691 692
692 spin_unlock_irq(&tp->lock); 693 spin_unlock_irqrestore(&tp->lock, flags);
693 694
694 dev->trans_start = jiffies; 695 dev->trans_start = jiffies;
695 696
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index 7fb96f33bade..3b647d07e410 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -3084,10 +3084,11 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3084 u8 __iomem *bd; /* BD pointer */ 3084 u8 __iomem *bd; /* BD pointer */
3085 u32 bd_status; 3085 u32 bd_status;
3086 u8 txQ = 0; 3086 u8 txQ = 0;
3087 unsigned long flags;
3087 3088
3088 ugeth_vdbg("%s: IN", __func__); 3089 ugeth_vdbg("%s: IN", __func__);
3089 3090
3090 spin_lock_irq(&ugeth->lock); 3091 spin_lock_irqsave(&ugeth->lock, flags);
3091 3092
3092 dev->stats.tx_bytes += skb->len; 3093 dev->stats.tx_bytes += skb->len;
3093 3094
@@ -3144,7 +3145,7 @@ static int ucc_geth_start_xmit(struct sk_buff *skb, struct net_device *dev)
3144 uccf = ugeth->uccf; 3145 uccf = ugeth->uccf;
3145 out_be16(uccf->p_utodr, UCC_FAST_TOD); 3146 out_be16(uccf->p_utodr, UCC_FAST_TOD);
3146#endif 3147#endif
3147 spin_unlock_irq(&ugeth->lock); 3148 spin_unlock_irqrestore(&ugeth->lock, flags);
3148 3149
3149 return NETDEV_TX_OK; 3150 return NETDEV_TX_OK;
3150} 3151}
diff --git a/drivers/net/usb/pegasus.h b/drivers/net/usb/pegasus.h
index c7467823cd1c..f968c834ff63 100644
--- a/drivers/net/usb/pegasus.h
+++ b/drivers/net/usb/pegasus.h
@@ -250,6 +250,8 @@ PEGASUS_DEV( "IO DATA USB ET/TX", VENDOR_IODATA, 0x0904,
250 DEFAULT_GPIO_RESET ) 250 DEFAULT_GPIO_RESET )
251PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913, 251PEGASUS_DEV( "IO DATA USB ET/TX-S", VENDOR_IODATA, 0x0913,
252 DEFAULT_GPIO_RESET | PEGASUS_II ) 252 DEFAULT_GPIO_RESET | PEGASUS_II )
253PEGASUS_DEV( "IO DATA USB ETX-US2", VENDOR_IODATA, 0x092a,
254 DEFAULT_GPIO_RESET | PEGASUS_II )
253PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a, 255PEGASUS_DEV( "Kingston KNU101TX Ethernet", VENDOR_KINGSTON, 0x000a,
254 DEFAULT_GPIO_RESET) 256 DEFAULT_GPIO_RESET)
255PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002, 257PEGASUS_DEV( "LANEED USB Ethernet LD-USB/TX", VENDOR_LANEED, 0x4002,
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 081402cb05fd..1fd70583be44 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1220,6 +1220,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1220 struct rhine_private *rp = netdev_priv(dev); 1220 struct rhine_private *rp = netdev_priv(dev);
1221 void __iomem *ioaddr = rp->base; 1221 void __iomem *ioaddr = rp->base;
1222 unsigned entry; 1222 unsigned entry;
1223 unsigned long flags;
1223 1224
1224 /* Caution: the write order is important here, set the field 1225 /* Caution: the write order is important here, set the field
1225 with the "ownership" bits last. */ 1226 with the "ownership" bits last. */
@@ -1263,7 +1264,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1263 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1264 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1264 1265
1265 /* lock eth irq */ 1266 /* lock eth irq */
1266 spin_lock_irq(&rp->lock); 1267 spin_lock_irqsave(&rp->lock, flags);
1267 wmb(); 1268 wmb();
1268 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1269 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1269 wmb(); 1270 wmb();
@@ -1282,7 +1283,7 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1282 1283
1283 dev->trans_start = jiffies; 1284 dev->trans_start = jiffies;
1284 1285
1285 spin_unlock_irq(&rp->lock); 1286 spin_unlock_irqrestore(&rp->lock, flags);
1286 1287
1287 if (debug > 4) { 1288 if (debug > 4) {
1288 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", 1289 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e56cf6b548d6..ced1446dec04 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -1789,7 +1789,7 @@ static void velocity_error(struct velocity_info *vptr, int status)
1789 * mode 1789 * mode
1790 */ 1790 */
1791 if (vptr->rev_id < REV_ID_VT3216_A0) { 1791 if (vptr->rev_id < REV_ID_VT3216_A0) {
1792 if (vptr->mii_status | VELOCITY_DUPLEX_FULL) 1792 if (vptr->mii_status & VELOCITY_DUPLEX_FULL)
1793 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR); 1793 BYTE_REG_BITS_ON(TCR_TB2BDIS, &regs->TCR);
1794 else 1794 else
1795 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR); 1795 BYTE_REG_BITS_OFF(TCR_TB2BDIS, &regs->TCR);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 31279c4c02c2..51e9ce4907f0 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -70,6 +70,9 @@ struct virtnet_info
70 struct sk_buff_head recv; 70 struct sk_buff_head recv;
71 struct sk_buff_head send; 71 struct sk_buff_head send;
72 72
73 /* Work struct for refilling if we run low on memory. */
74 struct delayed_work refill;
75
73 /* Chain pages by the private ptr. */ 76 /* Chain pages by the private ptr. */
74 struct page *pages; 77 struct page *pages;
75}; 78};
@@ -273,19 +276,22 @@ drop:
273 dev_kfree_skb(skb); 276 dev_kfree_skb(skb);
274} 277}
275 278
276static void try_fill_recv_maxbufs(struct virtnet_info *vi) 279static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
277{ 280{
278 struct sk_buff *skb; 281 struct sk_buff *skb;
279 struct scatterlist sg[2+MAX_SKB_FRAGS]; 282 struct scatterlist sg[2+MAX_SKB_FRAGS];
280 int num, err, i; 283 int num, err, i;
284 bool oom = false;
281 285
282 sg_init_table(sg, 2+MAX_SKB_FRAGS); 286 sg_init_table(sg, 2+MAX_SKB_FRAGS);
283 for (;;) { 287 for (;;) {
284 struct virtio_net_hdr *hdr; 288 struct virtio_net_hdr *hdr;
285 289
286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 290 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
287 if (unlikely(!skb)) 291 if (unlikely(!skb)) {
292 oom = true;
288 break; 293 break;
294 }
289 295
290 skb_reserve(skb, NET_IP_ALIGN); 296 skb_reserve(skb, NET_IP_ALIGN);
291 skb_put(skb, MAX_PACKET_LEN); 297 skb_put(skb, MAX_PACKET_LEN);
@@ -296,7 +302,7 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
296 if (vi->big_packets) { 302 if (vi->big_packets) {
297 for (i = 0; i < MAX_SKB_FRAGS; i++) { 303 for (i = 0; i < MAX_SKB_FRAGS; i++) {
298 skb_frag_t *f = &skb_shinfo(skb)->frags[i]; 304 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
299 f->page = get_a_page(vi, GFP_ATOMIC); 305 f->page = get_a_page(vi, gfp);
300 if (!f->page) 306 if (!f->page)
301 break; 307 break;
302 308
@@ -325,31 +331,35 @@ static void try_fill_recv_maxbufs(struct virtnet_info *vi)
325 if (unlikely(vi->num > vi->max)) 331 if (unlikely(vi->num > vi->max))
326 vi->max = vi->num; 332 vi->max = vi->num;
327 vi->rvq->vq_ops->kick(vi->rvq); 333 vi->rvq->vq_ops->kick(vi->rvq);
334 return !oom;
328} 335}
329 336
330static void try_fill_recv(struct virtnet_info *vi) 337/* Returns false if we couldn't fill entirely (OOM). */
338static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
331{ 339{
332 struct sk_buff *skb; 340 struct sk_buff *skb;
333 struct scatterlist sg[1]; 341 struct scatterlist sg[1];
334 int err; 342 int err;
343 bool oom = false;
335 344
336 if (!vi->mergeable_rx_bufs) { 345 if (!vi->mergeable_rx_bufs)
337 try_fill_recv_maxbufs(vi); 346 return try_fill_recv_maxbufs(vi, gfp);
338 return;
339 }
340 347
341 for (;;) { 348 for (;;) {
342 skb_frag_t *f; 349 skb_frag_t *f;
343 350
344 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 351 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
345 if (unlikely(!skb)) 352 if (unlikely(!skb)) {
353 oom = true;
346 break; 354 break;
355 }
347 356
348 skb_reserve(skb, NET_IP_ALIGN); 357 skb_reserve(skb, NET_IP_ALIGN);
349 358
350 f = &skb_shinfo(skb)->frags[0]; 359 f = &skb_shinfo(skb)->frags[0];
351 f->page = get_a_page(vi, GFP_ATOMIC); 360 f->page = get_a_page(vi, gfp);
352 if (!f->page) { 361 if (!f->page) {
362 oom = true;
353 kfree_skb(skb); 363 kfree_skb(skb);
354 break; 364 break;
355 } 365 }
@@ -373,6 +383,7 @@ static void try_fill_recv(struct virtnet_info *vi)
373 if (unlikely(vi->num > vi->max)) 383 if (unlikely(vi->num > vi->max))
374 vi->max = vi->num; 384 vi->max = vi->num;
375 vi->rvq->vq_ops->kick(vi->rvq); 385 vi->rvq->vq_ops->kick(vi->rvq);
386 return !oom;
376} 387}
377 388
378static void skb_recv_done(struct virtqueue *rvq) 389static void skb_recv_done(struct virtqueue *rvq)
@@ -385,6 +396,23 @@ static void skb_recv_done(struct virtqueue *rvq)
385 } 396 }
386} 397}
387 398
399static void refill_work(struct work_struct *work)
400{
401 struct virtnet_info *vi;
402 bool still_empty;
403
404 vi = container_of(work, struct virtnet_info, refill.work);
405 napi_disable(&vi->napi);
406 try_fill_recv(vi, GFP_KERNEL);
407 still_empty = (vi->num == 0);
408 napi_enable(&vi->napi);
409
410 /* In theory, this can happen: if we don't get any buffers in
411 * we will *never* try to fill again. */
412 if (still_empty)
413 schedule_delayed_work(&vi->refill, HZ/2);
414}
415
388static int virtnet_poll(struct napi_struct *napi, int budget) 416static int virtnet_poll(struct napi_struct *napi, int budget)
389{ 417{
390 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); 418 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
@@ -400,10 +428,10 @@ again:
400 received++; 428 received++;
401 } 429 }
402 430
403 /* FIXME: If we oom and completely run out of inbufs, we need 431 if (vi->num < vi->max / 2) {
404 * to start a timer trying to fill more. */ 432 if (!try_fill_recv(vi, GFP_ATOMIC))
405 if (vi->num < vi->max / 2) 433 schedule_delayed_work(&vi->refill, 0);
406 try_fill_recv(vi); 434 }
407 435
408 /* Out of packets? */ 436 /* Out of packets? */
409 if (received < budget) { 437 if (received < budget) {
@@ -894,6 +922,7 @@ static int virtnet_probe(struct virtio_device *vdev)
894 vi->vdev = vdev; 922 vi->vdev = vdev;
895 vdev->priv = vi; 923 vdev->priv = vi;
896 vi->pages = NULL; 924 vi->pages = NULL;
925 INIT_DELAYED_WORK(&vi->refill, refill_work);
897 926
898 /* If they give us a callback when all buffers are done, we don't need 927 /* If they give us a callback when all buffers are done, we don't need
899 * the timer. */ 928 * the timer. */
@@ -942,7 +971,7 @@ static int virtnet_probe(struct virtio_device *vdev)
942 } 971 }
943 972
944 /* Last of all, set up some receive buffers. */ 973 /* Last of all, set up some receive buffers. */
945 try_fill_recv(vi); 974 try_fill_recv(vi, GFP_KERNEL);
946 975
947 /* If we didn't even get one input buffer, we're useless. */ 976 /* If we didn't even get one input buffer, we're useless. */
948 if (vi->num == 0) { 977 if (vi->num == 0) {
@@ -959,6 +988,7 @@ static int virtnet_probe(struct virtio_device *vdev)
959 988
960unregister: 989unregister:
961 unregister_netdev(dev); 990 unregister_netdev(dev);
991 cancel_delayed_work_sync(&vi->refill);
962free_vqs: 992free_vqs:
963 vdev->config->del_vqs(vdev); 993 vdev->config->del_vqs(vdev);
964free: 994free:
@@ -987,6 +1017,7 @@ static void virtnet_remove(struct virtio_device *vdev)
987 BUG_ON(vi->num != 0); 1017 BUG_ON(vi->num != 0);
988 1018
989 unregister_netdev(vi->dev); 1019 unregister_netdev(vi->dev);
1020 cancel_delayed_work_sync(&vi->refill);
990 1021
991 vdev->config->del_vqs(vi->vdev); 1022 vdev->config->del_vqs(vi->vdev);
992 1023
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 3838f9f9a47a..8d58e6ed4e7d 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -2893,45 +2893,27 @@ static int ipw_fw_dma_add_command_block(struct ipw_priv *priv,
2893 return 0; 2893 return 0;
2894} 2894}
2895 2895
2896static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, 2896static int ipw_fw_dma_add_buffer(struct ipw_priv *priv, dma_addr_t *src_address,
2897 u32 src_phys, u32 dest_address, u32 length) 2897 int nr, u32 dest_address, u32 len)
2898{ 2898{
2899 u32 bytes_left = length; 2899 int ret, i;
2900 u32 src_offset = 0; 2900 u32 size;
2901 u32 dest_offset = 0; 2901
2902 int status = 0;
2903 IPW_DEBUG_FW(">> \n"); 2902 IPW_DEBUG_FW(">> \n");
2904 IPW_DEBUG_FW_INFO("src_phys=0x%x dest_address=0x%x length=0x%x\n", 2903 IPW_DEBUG_FW_INFO("nr=%d dest_address=0x%x len=0x%x\n",
2905 src_phys, dest_address, length); 2904 nr, dest_address, len);
2906 while (bytes_left > CB_MAX_LENGTH) { 2905
2907 status = ipw_fw_dma_add_command_block(priv, 2906 for (i = 0; i < nr; i++) {
2908 src_phys + src_offset, 2907 size = min_t(u32, len - i * CB_MAX_LENGTH, CB_MAX_LENGTH);
2909 dest_address + 2908 ret = ipw_fw_dma_add_command_block(priv, src_address[i],
2910 dest_offset, 2909 dest_address +
2911 CB_MAX_LENGTH, 0, 0); 2910 i * CB_MAX_LENGTH, size,
2912 if (status) { 2911 0, 0);
2912 if (ret) {
2913 IPW_DEBUG_FW_INFO(": Failed\n"); 2913 IPW_DEBUG_FW_INFO(": Failed\n");
2914 return -1; 2914 return -1;
2915 } else 2915 } else
2916 IPW_DEBUG_FW_INFO(": Added new cb\n"); 2916 IPW_DEBUG_FW_INFO(": Added new cb\n");
2917
2918 src_offset += CB_MAX_LENGTH;
2919 dest_offset += CB_MAX_LENGTH;
2920 bytes_left -= CB_MAX_LENGTH;
2921 }
2922
2923 /* add the buffer tail */
2924 if (bytes_left > 0) {
2925 status =
2926 ipw_fw_dma_add_command_block(priv, src_phys + src_offset,
2927 dest_address + dest_offset,
2928 bytes_left, 0, 0);
2929 if (status) {
2930 IPW_DEBUG_FW_INFO(": Failed on the buffer tail\n");
2931 return -1;
2932 } else
2933 IPW_DEBUG_FW_INFO
2934 (": Adding new cb - the buffer tail\n");
2935 } 2917 }
2936 2918
2937 IPW_DEBUG_FW("<< \n"); 2919 IPW_DEBUG_FW("<< \n");
@@ -3179,59 +3161,91 @@ static int ipw_load_ucode(struct ipw_priv *priv, u8 * data, size_t len)
3179 3161
3180static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len) 3162static int ipw_load_firmware(struct ipw_priv *priv, u8 * data, size_t len)
3181{ 3163{
3182 int rc = -1; 3164 int ret = -1;
3183 int offset = 0; 3165 int offset = 0;
3184 struct fw_chunk *chunk; 3166 struct fw_chunk *chunk;
3185 dma_addr_t shared_phys; 3167 int total_nr = 0;
3186 u8 *shared_virt; 3168 int i;
3169 struct pci_pool *pool;
3170 u32 *virts[CB_NUMBER_OF_ELEMENTS_SMALL];
3171 dma_addr_t phys[CB_NUMBER_OF_ELEMENTS_SMALL];
3187 3172
3188 IPW_DEBUG_TRACE("<< : \n"); 3173 IPW_DEBUG_TRACE("<< : \n");
3189 shared_virt = pci_alloc_consistent(priv->pci_dev, len, &shared_phys);
3190 3174
3191 if (!shared_virt) 3175 pool = pci_pool_create("ipw2200", priv->pci_dev, CB_MAX_LENGTH, 0, 0);
3176 if (!pool) {
3177 IPW_ERROR("pci_pool_create failed\n");
3192 return -ENOMEM; 3178 return -ENOMEM;
3193 3179 }
3194 memmove(shared_virt, data, len);
3195 3180
3196 /* Start the Dma */ 3181 /* Start the Dma */
3197 rc = ipw_fw_dma_enable(priv); 3182 ret = ipw_fw_dma_enable(priv);
3198 3183
3199 /* the DMA is already ready this would be a bug. */ 3184 /* the DMA is already ready this would be a bug. */
3200 BUG_ON(priv->sram_desc.last_cb_index > 0); 3185 BUG_ON(priv->sram_desc.last_cb_index > 0);
3201 3186
3202 do { 3187 do {
3188 u32 chunk_len;
3189 u8 *start;
3190 int size;
3191 int nr = 0;
3192
3203 chunk = (struct fw_chunk *)(data + offset); 3193 chunk = (struct fw_chunk *)(data + offset);
3204 offset += sizeof(struct fw_chunk); 3194 offset += sizeof(struct fw_chunk);
3195 chunk_len = le32_to_cpu(chunk->length);
3196 start = data + offset;
3197
3198 nr = (chunk_len + CB_MAX_LENGTH - 1) / CB_MAX_LENGTH;
3199 for (i = 0; i < nr; i++) {
3200 virts[total_nr] = pci_pool_alloc(pool, GFP_KERNEL,
3201 &phys[total_nr]);
3202 if (!virts[total_nr]) {
3203 ret = -ENOMEM;
3204 goto out;
3205 }
3206 size = min_t(u32, chunk_len - i * CB_MAX_LENGTH,
3207 CB_MAX_LENGTH);
3208 memcpy(virts[total_nr], start, size);
3209 start += size;
3210 total_nr++;
3211 /* We don't support fw chunk larger than 64*8K */
3212 BUG_ON(total_nr > CB_NUMBER_OF_ELEMENTS_SMALL);
3213 }
3214
3205 /* build DMA packet and queue up for sending */ 3215 /* build DMA packet and queue up for sending */
3206 /* dma to chunk->address, the chunk->length bytes from data + 3216 /* dma to chunk->address, the chunk->length bytes from data +
3207 * offeset*/ 3217 * offeset*/
3208 /* Dma loading */ 3218 /* Dma loading */
3209 rc = ipw_fw_dma_add_buffer(priv, shared_phys + offset, 3219 ret = ipw_fw_dma_add_buffer(priv, &phys[total_nr - nr],
3210 le32_to_cpu(chunk->address), 3220 nr, le32_to_cpu(chunk->address),
3211 le32_to_cpu(chunk->length)); 3221 chunk_len);
3212 if (rc) { 3222 if (ret) {
3213 IPW_DEBUG_INFO("dmaAddBuffer Failed\n"); 3223 IPW_DEBUG_INFO("dmaAddBuffer Failed\n");
3214 goto out; 3224 goto out;
3215 } 3225 }
3216 3226
3217 offset += le32_to_cpu(chunk->length); 3227 offset += chunk_len;
3218 } while (offset < len); 3228 } while (offset < len);
3219 3229
3220 /* Run the DMA and wait for the answer */ 3230 /* Run the DMA and wait for the answer */
3221 rc = ipw_fw_dma_kick(priv); 3231 ret = ipw_fw_dma_kick(priv);
3222 if (rc) { 3232 if (ret) {
3223 IPW_ERROR("dmaKick Failed\n"); 3233 IPW_ERROR("dmaKick Failed\n");
3224 goto out; 3234 goto out;
3225 } 3235 }
3226 3236
3227 rc = ipw_fw_dma_wait(priv); 3237 ret = ipw_fw_dma_wait(priv);
3228 if (rc) { 3238 if (ret) {
3229 IPW_ERROR("dmaWaitSync Failed\n"); 3239 IPW_ERROR("dmaWaitSync Failed\n");
3230 goto out; 3240 goto out;
3231 } 3241 }
3232 out: 3242 out:
3233 pci_free_consistent(priv->pci_dev, len, shared_virt, shared_phys); 3243 for (i = 0; i < total_nr; i++)
3234 return rc; 3244 pci_pool_free(pool, virts[i], phys[i]);
3245
3246 pci_pool_destroy(pool);
3247
3248 return ret;
3235} 3249}
3236 3250
3237/* stop nic */ 3251/* stop nic */
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 40d8dfa7eace..359652d35e63 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -644,7 +644,7 @@ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc)
644 int err = 0; 644 int err = 0;
645 u8 tsc_arr[4][ORINOCO_SEQ_LEN]; 645 u8 tsc_arr[4][ORINOCO_SEQ_LEN];
646 646
647 if ((key < 0) || (key > 4)) 647 if ((key < 0) || (key >= 4))
648 return -EINVAL; 648 return -EINVAL;
649 649
650 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, 650 err = hermes_read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV,
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 9679b29e1c49..2017ccc00145 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -871,6 +871,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
871 priv->aifsn[3] = 3; /* AIFSN[AC_BE] */ 871 priv->aifsn[3] = 3; /* AIFSN[AC_BE] */
872 rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0); 872 rtl818x_iowrite8(priv, &priv->map->ACM_CONTROL, 0);
873 873
874 /* ENEDCA flag must always be set, transmit issues? */
875 rtl818x_iowrite8(priv, &priv->map->MSR, RTL818X_MSR_ENEDCA);
876
874 return 0; 877 return 0;
875} 878}
876 879
@@ -1176,13 +1179,16 @@ static void rtl8187_bss_info_changed(struct ieee80211_hw *dev,
1176 rtl818x_iowrite8(priv, &priv->map->BSSID[i], 1179 rtl818x_iowrite8(priv, &priv->map->BSSID[i],
1177 info->bssid[i]); 1180 info->bssid[i]);
1178 1181
1182 if (priv->is_rtl8187b)
1183 reg = RTL818X_MSR_ENEDCA;
1184 else
1185 reg = 0;
1186
1179 if (is_valid_ether_addr(info->bssid)) { 1187 if (is_valid_ether_addr(info->bssid)) {
1180 reg = RTL818X_MSR_INFRA; 1188 reg |= RTL818X_MSR_INFRA;
1181 if (priv->is_rtl8187b)
1182 reg |= RTL818X_MSR_ENEDCA;
1183 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1189 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
1184 } else { 1190 } else {
1185 reg = RTL818X_MSR_NO_LINK; 1191 reg |= RTL818X_MSR_NO_LINK;
1186 rtl818x_iowrite8(priv, &priv->map->MSR, reg); 1192 rtl818x_iowrite8(priv, &priv->map->MSR, reg);
1187 } 1193 }
1188 1194
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index 9509477f61f4..4987040c414b 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -346,7 +346,7 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
346static int yellowfin_open(struct net_device *dev); 346static int yellowfin_open(struct net_device *dev);
347static void yellowfin_timer(unsigned long data); 347static void yellowfin_timer(unsigned long data);
348static void yellowfin_tx_timeout(struct net_device *dev); 348static void yellowfin_tx_timeout(struct net_device *dev);
349static void yellowfin_init_ring(struct net_device *dev); 349static int yellowfin_init_ring(struct net_device *dev);
350static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, 350static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,
351 struct net_device *dev); 351 struct net_device *dev);
352static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); 352static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance);
@@ -574,19 +574,24 @@ static int yellowfin_open(struct net_device *dev)
574{ 574{
575 struct yellowfin_private *yp = netdev_priv(dev); 575 struct yellowfin_private *yp = netdev_priv(dev);
576 void __iomem *ioaddr = yp->base; 576 void __iomem *ioaddr = yp->base;
577 int i; 577 int i, ret;
578 578
579 /* Reset the chip. */ 579 /* Reset the chip. */
580 iowrite32(0x80000000, ioaddr + DMACtrl); 580 iowrite32(0x80000000, ioaddr + DMACtrl);
581 581
582 i = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev); 582 ret = request_irq(dev->irq, &yellowfin_interrupt, IRQF_SHARED, dev->name, dev);
583 if (i) return i; 583 if (ret)
584 return ret;
584 585
585 if (yellowfin_debug > 1) 586 if (yellowfin_debug > 1)
586 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n", 587 printk(KERN_DEBUG "%s: yellowfin_open() irq %d.\n",
587 dev->name, dev->irq); 588 dev->name, dev->irq);
588 589
589 yellowfin_init_ring(dev); 590 ret = yellowfin_init_ring(dev);
591 if (ret) {
592 free_irq(dev->irq, dev);
593 return ret;
594 }
590 595
591 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); 596 iowrite32(yp->rx_ring_dma, ioaddr + RxPtr);
592 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); 597 iowrite32(yp->tx_ring_dma, ioaddr + TxPtr);
@@ -726,10 +731,10 @@ static void yellowfin_tx_timeout(struct net_device *dev)
726} 731}
727 732
728/* Initialize the Rx and Tx rings, along with various 'dev' bits. */ 733/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
729static void yellowfin_init_ring(struct net_device *dev) 734static int yellowfin_init_ring(struct net_device *dev)
730{ 735{
731 struct yellowfin_private *yp = netdev_priv(dev); 736 struct yellowfin_private *yp = netdev_priv(dev);
732 int i; 737 int i, j;
733 738
734 yp->tx_full = 0; 739 yp->tx_full = 0;
735 yp->cur_rx = yp->cur_tx = 0; 740 yp->cur_rx = yp->cur_tx = 0;
@@ -754,6 +759,11 @@ static void yellowfin_init_ring(struct net_device *dev)
754 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, 759 yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev,
755 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 760 skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE));
756 } 761 }
762 if (i != RX_RING_SIZE) {
763 for (j = 0; j < i; j++)
764 dev_kfree_skb(yp->rx_skbuff[j]);
765 return -ENOMEM;
766 }
757 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); 767 yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP);
758 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); 768 yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
759 769
@@ -770,8 +780,6 @@ static void yellowfin_init_ring(struct net_device *dev)
770 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); 780 yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS);
771#else 781#else
772{ 782{
773 int j;
774
775 /* Tx ring needs a pair of descriptors, the second for the status. */ 783 /* Tx ring needs a pair of descriptors, the second for the status. */
776 for (i = 0; i < TX_RING_SIZE; i++) { 784 for (i = 0; i < TX_RING_SIZE; i++) {
777 j = 2*i; 785 j = 2*i;
@@ -806,7 +814,7 @@ static void yellowfin_init_ring(struct net_device *dev)
806} 814}
807#endif 815#endif
808 yp->tx_tail_desc = &yp->tx_status[0]; 816 yp->tx_tail_desc = &yp->tx_status[0];
809 return; 817 return 0;
810} 818}
811 819
812static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, 820static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb,