diff options
author | David S. Miller <davem@davemloft.net> | 2013-02-05 14:12:20 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-02-05 14:12:20 -0500 |
commit | 188d1f76d0dd3715ceeadfa31376867c3395eb41 (patch) | |
tree | b8976427ec21d3c346f2a993160b368c620c249a | |
parent | 577ae39ddb037242964f5fe87fd50b0b89e3263b (diff) | |
parent | bf414b369f158bb527f9f29174ada815f961b44c (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/wireless/iwlwifi/dvm/tx.c
net/ipv6/route.c
The ipv6 route.c conflict is simple, just ignore the 'net' side change
as we fixed the same problem in 'net-next' by eliminating cached
neighbours from ipv6 routes.
The e1000e conflict is an addition of a new statistic in the ethtool
code, trivial.
The vmxnet3 conflict is about one change in 'net' removing a guarding
conditional, whilst in 'net-next' we had a netdev_info() conversion.
The iwlwifi conflict is dealing with a WARN_ON() conversion in
'net-next' vs. a revert happening in 'net'.
Signed-off-by: David S. Miller <davem@davemloft.net>
38 files changed, 291 insertions, 166 deletions
diff --git a/drivers/bcma/driver_chipcommon_nflash.c b/drivers/bcma/driver_chipcommon_nflash.c index dbda91e4dff5..1f0b83e18f68 100644 --- a/drivers/bcma/driver_chipcommon_nflash.c +++ b/drivers/bcma/driver_chipcommon_nflash.c | |||
@@ -21,7 +21,7 @@ int bcma_nflash_init(struct bcma_drv_cc *cc) | |||
21 | struct bcma_bus *bus = cc->core->bus; | 21 | struct bcma_bus *bus = cc->core->bus; |
22 | 22 | ||
23 | if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && | 23 | if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4706 && |
24 | cc->core->id.rev != 0x38) { | 24 | cc->core->id.rev != 38) { |
25 | bcma_err(bus, "NAND flash on unsupported board!\n"); | 25 | bcma_err(bus, "NAND flash on unsupported board!\n"); |
26 | return -ENOTSUPP; | 26 | return -ENOTSUPP; |
27 | } | 27 | } |
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c index 1877ed7ca086..1c9e09fbdff8 100644 --- a/drivers/net/bonding/bond_sysfs.c +++ b/drivers/net/bonding/bond_sysfs.c | |||
@@ -1053,6 +1053,7 @@ static ssize_t bonding_store_primary(struct device *d, | |||
1053 | pr_info("%s: Setting primary slave to None.\n", | 1053 | pr_info("%s: Setting primary slave to None.\n", |
1054 | bond->dev->name); | 1054 | bond->dev->name); |
1055 | bond->primary_slave = NULL; | 1055 | bond->primary_slave = NULL; |
1056 | memset(bond->params.primary, 0, sizeof(bond->params.primary)); | ||
1056 | bond_select_active_slave(bond); | 1057 | bond_select_active_slave(bond); |
1057 | goto out; | 1058 | goto out; |
1058 | } | 1059 | } |
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c index 285f763e3cd1..a668cd491cb3 100644 --- a/drivers/net/can/c_can/c_can.c +++ b/drivers/net/can/c_can/c_can.c | |||
@@ -491,8 +491,12 @@ static void c_can_setup_receive_object(struct net_device *dev, int iface, | |||
491 | 491 | ||
492 | priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), | 492 | priv->write_reg(priv, C_CAN_IFACE(MASK1_REG, iface), |
493 | IFX_WRITE_LOW_16BIT(mask)); | 493 | IFX_WRITE_LOW_16BIT(mask)); |
494 | |||
495 | /* According to C_CAN documentation, the reserved bit | ||
496 | * in IFx_MASK2 register is fixed 1 | ||
497 | */ | ||
494 | priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), | 498 | priv->write_reg(priv, C_CAN_IFACE(MASK2_REG, iface), |
495 | IFX_WRITE_HIGH_16BIT(mask)); | 499 | IFX_WRITE_HIGH_16BIT(mask) | BIT(13)); |
496 | 500 | ||
497 | priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), | 501 | priv->write_reg(priv, C_CAN_IFACE(ARB1_REG, iface), |
498 | IFX_WRITE_LOW_16BIT(id)); | 502 | IFX_WRITE_LOW_16BIT(id)); |
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h index 4010cb71bddb..28ceb8414185 100644 --- a/drivers/net/ethernet/emulex/benet/be.h +++ b/drivers/net/ethernet/emulex/benet/be.h | |||
@@ -36,13 +36,13 @@ | |||
36 | 36 | ||
37 | #define DRV_VER "4.6.62.0u" | 37 | #define DRV_VER "4.6.62.0u" |
38 | #define DRV_NAME "be2net" | 38 | #define DRV_NAME "be2net" |
39 | #define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" | 39 | #define BE_NAME "Emulex BladeEngine2" |
40 | #define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" | 40 | #define BE3_NAME "Emulex BladeEngine3" |
41 | #define OC_NAME "Emulex OneConnect 10Gbps NIC" | 41 | #define OC_NAME "Emulex OneConnect" |
42 | #define OC_NAME_BE OC_NAME "(be3)" | 42 | #define OC_NAME_BE OC_NAME "(be3)" |
43 | #define OC_NAME_LANCER OC_NAME "(Lancer)" | 43 | #define OC_NAME_LANCER OC_NAME "(Lancer)" |
44 | #define OC_NAME_SH OC_NAME "(Skyhawk)" | 44 | #define OC_NAME_SH OC_NAME "(Skyhawk)" |
45 | #define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" | 45 | #define DRV_DESC "Emulex OneConnect 10Gbps NIC Driver" |
46 | 46 | ||
47 | #define BE_VENDOR_ID 0x19a2 | 47 | #define BE_VENDOR_ID 0x19a2 |
48 | #define EMULEX_VENDOR_ID 0x10df | 48 | #define EMULEX_VENDOR_ID 0x10df |
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 7d534818d2fb..3860888ac711 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c | |||
@@ -25,7 +25,7 @@ | |||
25 | MODULE_VERSION(DRV_VER); | 25 | MODULE_VERSION(DRV_VER); |
26 | MODULE_DEVICE_TABLE(pci, be_dev_ids); | 26 | MODULE_DEVICE_TABLE(pci, be_dev_ids); |
27 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); | 27 | MODULE_DESCRIPTION(DRV_DESC " " DRV_VER); |
28 | MODULE_AUTHOR("ServerEngines Corporation"); | 28 | MODULE_AUTHOR("Emulex Corporation"); |
29 | MODULE_LICENSE("GPL"); | 29 | MODULE_LICENSE("GPL"); |
30 | 30 | ||
31 | static unsigned int num_vfs; | 31 | static unsigned int num_vfs; |
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c index ec4a5e1c6fb2..185c721c52d7 100644 --- a/drivers/net/ethernet/via/via-rhine.c +++ b/drivers/net/ethernet/via/via-rhine.c | |||
@@ -1812,7 +1812,7 @@ static void rhine_tx(struct net_device *dev) | |||
1812 | rp->tx_skbuff[entry]->len, | 1812 | rp->tx_skbuff[entry]->len, |
1813 | PCI_DMA_TODEVICE); | 1813 | PCI_DMA_TODEVICE); |
1814 | } | 1814 | } |
1815 | dev_kfree_skb_irq(rp->tx_skbuff[entry]); | 1815 | dev_kfree_skb(rp->tx_skbuff[entry]); |
1816 | rp->tx_skbuff[entry] = NULL; | 1816 | rp->tx_skbuff[entry] = NULL; |
1817 | entry = (++rp->dirty_tx) % TX_RING_SIZE; | 1817 | entry = (++rp->dirty_tx) % TX_RING_SIZE; |
1818 | } | 1818 | } |
@@ -2024,11 +2024,7 @@ static void rhine_slow_event_task(struct work_struct *work) | |||
2024 | if (intr_status & IntrPCIErr) | 2024 | if (intr_status & IntrPCIErr) |
2025 | netif_warn(rp, hw, dev, "PCI error\n"); | 2025 | netif_warn(rp, hw, dev, "PCI error\n"); |
2026 | 2026 | ||
2027 | napi_disable(&rp->napi); | 2027 | iowrite16(RHINE_EVENT & 0xffff, rp->base + IntrEnable); |
2028 | rhine_irq_disable(rp); | ||
2029 | /* Slow and safe. Consider __napi_schedule as a replacement ? */ | ||
2030 | napi_enable(&rp->napi); | ||
2031 | napi_schedule(&rp->napi); | ||
2032 | 2028 | ||
2033 | out_unlock: | 2029 | out_unlock: |
2034 | mutex_unlock(&rp->task_lock); | 2030 | mutex_unlock(&rp->task_lock); |
diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 8d208dd92963..b1038c0e2240 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c | |||
@@ -298,11 +298,12 @@ static void tun_flow_cleanup(unsigned long data) | |||
298 | } | 298 | } |
299 | 299 | ||
300 | static void tun_flow_update(struct tun_struct *tun, u32 rxhash, | 300 | static void tun_flow_update(struct tun_struct *tun, u32 rxhash, |
301 | u16 queue_index) | 301 | struct tun_file *tfile) |
302 | { | 302 | { |
303 | struct hlist_head *head; | 303 | struct hlist_head *head; |
304 | struct tun_flow_entry *e; | 304 | struct tun_flow_entry *e; |
305 | unsigned long delay = tun->ageing_time; | 305 | unsigned long delay = tun->ageing_time; |
306 | u16 queue_index = tfile->queue_index; | ||
306 | 307 | ||
307 | if (!rxhash) | 308 | if (!rxhash) |
308 | return; | 309 | return; |
@@ -311,7 +312,9 @@ static void tun_flow_update(struct tun_struct *tun, u32 rxhash, | |||
311 | 312 | ||
312 | rcu_read_lock(); | 313 | rcu_read_lock(); |
313 | 314 | ||
314 | if (tun->numqueues == 1) | 315 | /* We may get a very small possibility of OOO during switching, not |
316 | * worth to optimize.*/ | ||
317 | if (tun->numqueues == 1 || tfile->detached) | ||
315 | goto unlock; | 318 | goto unlock; |
316 | 319 | ||
317 | e = tun_flow_find(head, rxhash); | 320 | e = tun_flow_find(head, rxhash); |
@@ -411,21 +414,21 @@ static void __tun_detach(struct tun_file *tfile, bool clean) | |||
411 | 414 | ||
412 | tun = rtnl_dereference(tfile->tun); | 415 | tun = rtnl_dereference(tfile->tun); |
413 | 416 | ||
414 | if (tun) { | 417 | if (tun && !tfile->detached) { |
415 | u16 index = tfile->queue_index; | 418 | u16 index = tfile->queue_index; |
416 | BUG_ON(index >= tun->numqueues); | 419 | BUG_ON(index >= tun->numqueues); |
417 | dev = tun->dev; | 420 | dev = tun->dev; |
418 | 421 | ||
419 | rcu_assign_pointer(tun->tfiles[index], | 422 | rcu_assign_pointer(tun->tfiles[index], |
420 | tun->tfiles[tun->numqueues - 1]); | 423 | tun->tfiles[tun->numqueues - 1]); |
421 | rcu_assign_pointer(tfile->tun, NULL); | ||
422 | ntfile = rtnl_dereference(tun->tfiles[index]); | 424 | ntfile = rtnl_dereference(tun->tfiles[index]); |
423 | ntfile->queue_index = index; | 425 | ntfile->queue_index = index; |
424 | 426 | ||
425 | --tun->numqueues; | 427 | --tun->numqueues; |
426 | if (clean) | 428 | if (clean) { |
429 | rcu_assign_pointer(tfile->tun, NULL); | ||
427 | sock_put(&tfile->sk); | 430 | sock_put(&tfile->sk); |
428 | else | 431 | } else |
429 | tun_disable_queue(tun, tfile); | 432 | tun_disable_queue(tun, tfile); |
430 | 433 | ||
431 | synchronize_net(); | 434 | synchronize_net(); |
@@ -439,10 +442,13 @@ static void __tun_detach(struct tun_file *tfile, bool clean) | |||
439 | } | 442 | } |
440 | 443 | ||
441 | if (clean) { | 444 | if (clean) { |
442 | if (tun && tun->numqueues == 0 && tun->numdisabled == 0 && | 445 | if (tun && tun->numqueues == 0 && tun->numdisabled == 0) { |
443 | !(tun->flags & TUN_PERSIST)) | 446 | netif_carrier_off(tun->dev); |
444 | if (tun->dev->reg_state == NETREG_REGISTERED) | 447 | |
448 | if (!(tun->flags & TUN_PERSIST) && | ||
449 | tun->dev->reg_state == NETREG_REGISTERED) | ||
445 | unregister_netdevice(tun->dev); | 450 | unregister_netdevice(tun->dev); |
451 | } | ||
446 | 452 | ||
447 | BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, | 453 | BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, |
448 | &tfile->socket.flags)); | 454 | &tfile->socket.flags)); |
@@ -470,6 +476,10 @@ static void tun_detach_all(struct net_device *dev) | |||
470 | rcu_assign_pointer(tfile->tun, NULL); | 476 | rcu_assign_pointer(tfile->tun, NULL); |
471 | --tun->numqueues; | 477 | --tun->numqueues; |
472 | } | 478 | } |
479 | list_for_each_entry(tfile, &tun->disabled, next) { | ||
480 | wake_up_all(&tfile->wq.wait); | ||
481 | rcu_assign_pointer(tfile->tun, NULL); | ||
482 | } | ||
473 | BUG_ON(tun->numqueues != 0); | 483 | BUG_ON(tun->numqueues != 0); |
474 | 484 | ||
475 | synchronize_net(); | 485 | synchronize_net(); |
@@ -500,7 +510,7 @@ static int tun_attach(struct tun_struct *tun, struct file *file) | |||
500 | goto out; | 510 | goto out; |
501 | 511 | ||
502 | err = -EINVAL; | 512 | err = -EINVAL; |
503 | if (rtnl_dereference(tfile->tun)) | 513 | if (rtnl_dereference(tfile->tun) && !tfile->detached) |
504 | goto out; | 514 | goto out; |
505 | 515 | ||
506 | err = -EBUSY; | 516 | err = -EBUSY; |
@@ -1203,7 +1213,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile, | |||
1203 | tun->dev->stats.rx_packets++; | 1213 | tun->dev->stats.rx_packets++; |
1204 | tun->dev->stats.rx_bytes += len; | 1214 | tun->dev->stats.rx_bytes += len; |
1205 | 1215 | ||
1206 | tun_flow_update(tun, rxhash, tfile->queue_index); | 1216 | tun_flow_update(tun, rxhash, tfile); |
1207 | return total_len; | 1217 | return total_len; |
1208 | } | 1218 | } |
1209 | 1219 | ||
@@ -1662,10 +1672,10 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr) | |||
1662 | device_create_file(&tun->dev->dev, &dev_attr_owner) || | 1672 | device_create_file(&tun->dev->dev, &dev_attr_owner) || |
1663 | device_create_file(&tun->dev->dev, &dev_attr_group)) | 1673 | device_create_file(&tun->dev->dev, &dev_attr_group)) |
1664 | pr_err("Failed to create tun sysfs files\n"); | 1674 | pr_err("Failed to create tun sysfs files\n"); |
1665 | |||
1666 | netif_carrier_on(tun->dev); | ||
1667 | } | 1675 | } |
1668 | 1676 | ||
1677 | netif_carrier_on(tun->dev); | ||
1678 | |||
1669 | tun_debug(KERN_INFO, tun, "tun_set_iff\n"); | 1679 | tun_debug(KERN_INFO, tun, "tun_set_iff\n"); |
1670 | 1680 | ||
1671 | if (ifr->ifr_flags & IFF_NO_PI) | 1681 | if (ifr->ifr_flags & IFF_NO_PI) |
@@ -1817,7 +1827,7 @@ static int tun_set_queue(struct file *file, struct ifreq *ifr) | |||
1817 | ret = tun_attach(tun, file); | 1827 | ret = tun_attach(tun, file); |
1818 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { | 1828 | } else if (ifr->ifr_flags & IFF_DETACH_QUEUE) { |
1819 | tun = rtnl_dereference(tfile->tun); | 1829 | tun = rtnl_dereference(tfile->tun); |
1820 | if (!tun || !(tun->flags & TUN_TAP_MQ)) | 1830 | if (!tun || !(tun->flags & TUN_TAP_MQ) || tfile->detached) |
1821 | ret = -EINVAL; | 1831 | ret = -EINVAL; |
1822 | else | 1832 | else |
1823 | __tun_detach(tfile, false); | 1833 | __tun_detach(tfile, false); |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index 575a5839ee34..2ca7f8ea2dca 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -461,6 +461,7 @@ static const struct usb_device_id products[] = { | |||
461 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ | 461 | {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ |
462 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ | 462 | {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */ |
463 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ | 463 | {QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */ |
464 | {QMI_FIXED_INTF(0x1bc7, 0x1200, 5)}, /* Telit LE920 */ | ||
464 | 465 | ||
465 | /* 4. Gobi 1000 devices */ | 466 | /* 4. Gobi 1000 devices */ |
466 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 467 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index 30c1b330e983..51f3192f3931 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c | |||
@@ -380,6 +380,12 @@ static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags) | |||
380 | unsigned long lockflags; | 380 | unsigned long lockflags; |
381 | size_t size = dev->rx_urb_size; | 381 | size_t size = dev->rx_urb_size; |
382 | 382 | ||
383 | /* prevent rx skb allocation when error ratio is high */ | ||
384 | if (test_bit(EVENT_RX_KILL, &dev->flags)) { | ||
385 | usb_free_urb(urb); | ||
386 | return -ENOLINK; | ||
387 | } | ||
388 | |||
383 | skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); | 389 | skb = __netdev_alloc_skb_ip_align(dev->net, size, flags); |
384 | if (!skb) { | 390 | if (!skb) { |
385 | netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); | 391 | netif_dbg(dev, rx_err, dev->net, "no rx skb\n"); |
@@ -539,6 +545,17 @@ block: | |||
539 | break; | 545 | break; |
540 | } | 546 | } |
541 | 547 | ||
548 | /* stop rx if packet error rate is high */ | ||
549 | if (++dev->pkt_cnt > 30) { | ||
550 | dev->pkt_cnt = 0; | ||
551 | dev->pkt_err = 0; | ||
552 | } else { | ||
553 | if (state == rx_cleanup) | ||
554 | dev->pkt_err++; | ||
555 | if (dev->pkt_err > 20) | ||
556 | set_bit(EVENT_RX_KILL, &dev->flags); | ||
557 | } | ||
558 | |||
542 | state = defer_bh(dev, skb, &dev->rxq, state); | 559 | state = defer_bh(dev, skb, &dev->rxq, state); |
543 | 560 | ||
544 | if (urb) { | 561 | if (urb) { |
@@ -791,6 +808,11 @@ int usbnet_open (struct net_device *net) | |||
791 | (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : | 808 | (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" : |
792 | "simple"); | 809 | "simple"); |
793 | 810 | ||
811 | /* reset rx error state */ | ||
812 | dev->pkt_cnt = 0; | ||
813 | dev->pkt_err = 0; | ||
814 | clear_bit(EVENT_RX_KILL, &dev->flags); | ||
815 | |||
794 | // delay posting reads until we're fully open | 816 | // delay posting reads until we're fully open |
795 | tasklet_schedule (&dev->bh); | 817 | tasklet_schedule (&dev->bh); |
796 | if (info->manage_power) { | 818 | if (info->manage_power) { |
@@ -1103,13 +1125,11 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb, | |||
1103 | if (info->tx_fixup) { | 1125 | if (info->tx_fixup) { |
1104 | skb = info->tx_fixup (dev, skb, GFP_ATOMIC); | 1126 | skb = info->tx_fixup (dev, skb, GFP_ATOMIC); |
1105 | if (!skb) { | 1127 | if (!skb) { |
1106 | if (netif_msg_tx_err(dev)) { | 1128 | /* packet collected; minidriver waiting for more */ |
1107 | netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); | 1129 | if (info->flags & FLAG_MULTI_PACKET) |
1108 | goto drop; | ||
1109 | } else { | ||
1110 | /* cdc_ncm collected packet; waits for more */ | ||
1111 | goto not_drop; | 1130 | goto not_drop; |
1112 | } | 1131 | netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); |
1132 | goto drop; | ||
1113 | } | 1133 | } |
1114 | } | 1134 | } |
1115 | length = skb->len; | 1135 | length = skb->len; |
@@ -1254,6 +1274,9 @@ static void usbnet_bh (unsigned long param) | |||
1254 | } | 1274 | } |
1255 | } | 1275 | } |
1256 | 1276 | ||
1277 | /* restart RX again after disabling due to high error rate */ | ||
1278 | clear_bit(EVENT_RX_KILL, &dev->flags); | ||
1279 | |||
1257 | // waiting for all pending urbs to complete? | 1280 | // waiting for all pending urbs to complete? |
1258 | if (dev->wait) { | 1281 | if (dev->wait) { |
1259 | if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { | 1282 | if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index b1c90f8ccd3d..ffb97b2a15a0 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -150,8 +150,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | |||
150 | if (ret & 1) { /* Link is up. */ | 150 | if (ret & 1) { /* Link is up. */ |
151 | netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", | 151 | netdev_info(adapter->netdev, "NIC Link is Up %d Mbps\n", |
152 | adapter->link_speed); | 152 | adapter->link_speed); |
153 | if (!netif_carrier_ok(adapter->netdev)) | 153 | netif_carrier_on(adapter->netdev); |
154 | netif_carrier_on(adapter->netdev); | ||
155 | 154 | ||
156 | if (affectTxQueue) { | 155 | if (affectTxQueue) { |
157 | for (i = 0; i < adapter->num_tx_queues; i++) | 156 | for (i = 0; i < adapter->num_tx_queues; i++) |
@@ -160,8 +159,7 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) | |||
160 | } | 159 | } |
161 | } else { | 160 | } else { |
162 | netdev_info(adapter->netdev, "NIC Link is Down\n"); | 161 | netdev_info(adapter->netdev, "NIC Link is Down\n"); |
163 | if (netif_carrier_ok(adapter->netdev)) | 162 | netif_carrier_off(adapter->netdev); |
164 | netif_carrier_off(adapter->netdev); | ||
165 | 163 | ||
166 | if (affectTxQueue) { | 164 | if (affectTxQueue) { |
167 | for (i = 0; i < adapter->num_tx_queues; i++) | 165 | for (i = 0; i < adapter->num_tx_queues; i++) |
@@ -3060,6 +3058,7 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
3060 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); | 3058 | netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues); |
3061 | netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); | 3059 | netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues); |
3062 | 3060 | ||
3061 | netif_carrier_off(netdev); | ||
3063 | err = register_netdev(netdev); | 3062 | err = register_netdev(netdev); |
3064 | 3063 | ||
3065 | if (err) { | 3064 | if (err) { |
diff --git a/drivers/net/wireless/brcm80211/brcmsmac/main.c b/drivers/net/wireless/brcm80211/brcmsmac/main.c index c26992a60e6c..8d560b64516d 100644 --- a/drivers/net/wireless/brcm80211/brcmsmac/main.c +++ b/drivers/net/wireless/brcm80211/brcmsmac/main.c | |||
@@ -1027,7 +1027,6 @@ brcms_c_dotxstatus(struct brcms_c_info *wlc, struct tx_status *txs) | |||
1027 | static bool | 1027 | static bool |
1028 | brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) | 1028 | brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) |
1029 | { | 1029 | { |
1030 | bool morepending = false; | ||
1031 | struct bcma_device *core; | 1030 | struct bcma_device *core; |
1032 | struct tx_status txstatus, *txs; | 1031 | struct tx_status txstatus, *txs; |
1033 | u32 s1, s2; | 1032 | u32 s1, s2; |
@@ -1041,23 +1040,20 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) | |||
1041 | txs = &txstatus; | 1040 | txs = &txstatus; |
1042 | core = wlc_hw->d11core; | 1041 | core = wlc_hw->d11core; |
1043 | *fatal = false; | 1042 | *fatal = false; |
1044 | s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); | ||
1045 | while (!(*fatal) | ||
1046 | && (s1 & TXS_V)) { | ||
1047 | /* !give others some time to run! */ | ||
1048 | if (n >= max_tx_num) { | ||
1049 | morepending = true; | ||
1050 | break; | ||
1051 | } | ||
1052 | 1043 | ||
1044 | while (n < max_tx_num) { | ||
1045 | s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); | ||
1053 | if (s1 == 0xffffffff) { | 1046 | if (s1 == 0xffffffff) { |
1054 | brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, | 1047 | brcms_err(core, "wl%d: %s: dead chip\n", wlc_hw->unit, |
1055 | __func__); | 1048 | __func__); |
1056 | *fatal = true; | 1049 | *fatal = true; |
1057 | return false; | 1050 | return false; |
1058 | } | 1051 | } |
1059 | s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); | 1052 | /* only process when valid */ |
1053 | if (!(s1 & TXS_V)) | ||
1054 | break; | ||
1060 | 1055 | ||
1056 | s2 = bcma_read32(core, D11REGOFFS(frmtxstatus2)); | ||
1061 | txs->status = s1 & TXS_STATUS_MASK; | 1057 | txs->status = s1 & TXS_STATUS_MASK; |
1062 | txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; | 1058 | txs->frameid = (s1 & TXS_FID_MASK) >> TXS_FID_SHIFT; |
1063 | txs->sequence = s2 & TXS_SEQ_MASK; | 1059 | txs->sequence = s2 & TXS_SEQ_MASK; |
@@ -1065,15 +1061,12 @@ brcms_b_txstatus(struct brcms_hardware *wlc_hw, bool bound, bool *fatal) | |||
1065 | txs->lasttxtime = 0; | 1061 | txs->lasttxtime = 0; |
1066 | 1062 | ||
1067 | *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); | 1063 | *fatal = brcms_c_dotxstatus(wlc_hw->wlc, txs); |
1068 | 1064 | if (*fatal == true) | |
1069 | s1 = bcma_read32(core, D11REGOFFS(frmtxstatus)); | 1065 | return false; |
1070 | n++; | 1066 | n++; |
1071 | } | 1067 | } |
1072 | 1068 | ||
1073 | if (*fatal) | 1069 | return n >= max_tx_num; |
1074 | return false; | ||
1075 | |||
1076 | return morepending; | ||
1077 | } | 1070 | } |
1078 | 1071 | ||
1079 | static void brcms_c_tbtt(struct brcms_c_info *wlc) | 1072 | static void brcms_c_tbtt(struct brcms_c_info *wlc) |
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c index 6b01fc195940..191b9d4bee47 100644 --- a/drivers/net/wireless/iwlwifi/dvm/tx.c +++ b/drivers/net/wireless/iwlwifi/dvm/tx.c | |||
@@ -1145,6 +1145,13 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | |||
1145 | next_reclaimed = ssn; | 1145 | next_reclaimed = ssn; |
1146 | } | 1146 | } |
1147 | 1147 | ||
1148 | if (tid != IWL_TID_NON_QOS) { | ||
1149 | priv->tid_data[sta_id][tid].next_reclaimed = | ||
1150 | next_reclaimed; | ||
1151 | IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", | ||
1152 | next_reclaimed); | ||
1153 | } | ||
1154 | |||
1148 | iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); | 1155 | iwl_trans_reclaim(priv->trans, txq_id, ssn, &skbs); |
1149 | 1156 | ||
1150 | iwlagn_check_ratid_empty(priv, sta_id, tid); | 1157 | iwlagn_check_ratid_empty(priv, sta_id, tid); |
@@ -1195,30 +1202,12 @@ int iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_cmd_buffer *rxb, | |||
1195 | if (!is_agg) | 1202 | if (!is_agg) |
1196 | iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); | 1203 | iwlagn_non_agg_tx_status(priv, ctx, hdr->addr1); |
1197 | 1204 | ||
1198 | /* | ||
1199 | * W/A for FW bug - the seq_ctl isn't updated when the | ||
1200 | * queues are flushed. Fetch it from the packet itself | ||
1201 | */ | ||
1202 | if (!is_agg && status == TX_STATUS_FAIL_FIFO_FLUSHED) { | ||
1203 | next_reclaimed = le16_to_cpu(hdr->seq_ctrl); | ||
1204 | next_reclaimed = | ||
1205 | SEQ_TO_SN(next_reclaimed + 0x10); | ||
1206 | } | ||
1207 | |||
1208 | is_offchannel_skb = | 1205 | is_offchannel_skb = |
1209 | (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); | 1206 | (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN); |
1210 | freed++; | 1207 | freed++; |
1211 | } | 1208 | } |
1212 | 1209 | ||
1213 | if (tid != IWL_TID_NON_QOS) { | 1210 | WARN_ON(!is_agg && freed != 1); |
1214 | priv->tid_data[sta_id][tid].next_reclaimed = | ||
1215 | next_reclaimed; | ||
1216 | IWL_DEBUG_TX_REPLY(priv, "Next reclaimed packet:%d\n", | ||
1217 | next_reclaimed); | ||
1218 | } | ||
1219 | |||
1220 | if (!is_agg && freed != 1) | ||
1221 | IWL_ERR(priv, "Q: %d, freed %d\n", txq_id, freed); | ||
1222 | 1211 | ||
1223 | /* | 1212 | /* |
1224 | * An offchannel frame can be send only on the AUX queue, where | 1213 | * An offchannel frame can be send only on the AUX queue, where |
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index f0de40166dc3..d41f0e647280 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c | |||
@@ -1557,7 +1557,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
1557 | dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", | 1557 | dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", |
1558 | scan_rsp->number_of_sets); | 1558 | scan_rsp->number_of_sets); |
1559 | ret = -1; | 1559 | ret = -1; |
1560 | goto done; | 1560 | goto check_next_scan; |
1561 | } | 1561 | } |
1562 | 1562 | ||
1563 | bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); | 1563 | bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); |
@@ -1628,7 +1628,8 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
1628 | if (!beacon_size || beacon_size > bytes_left) { | 1628 | if (!beacon_size || beacon_size > bytes_left) { |
1629 | bss_info += bytes_left; | 1629 | bss_info += bytes_left; |
1630 | bytes_left = 0; | 1630 | bytes_left = 0; |
1631 | return -1; | 1631 | ret = -1; |
1632 | goto check_next_scan; | ||
1632 | } | 1633 | } |
1633 | 1634 | ||
1634 | /* Initialize the current working beacon pointer for this BSS | 1635 | /* Initialize the current working beacon pointer for this BSS |
@@ -1684,7 +1685,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
1684 | dev_err(priv->adapter->dev, | 1685 | dev_err(priv->adapter->dev, |
1685 | "%s: bytes left < IE length\n", | 1686 | "%s: bytes left < IE length\n", |
1686 | __func__); | 1687 | __func__); |
1687 | goto done; | 1688 | goto check_next_scan; |
1688 | } | 1689 | } |
1689 | if (element_id == WLAN_EID_DS_PARAMS) { | 1690 | if (element_id == WLAN_EID_DS_PARAMS) { |
1690 | channel = *(current_ptr + sizeof(struct ieee_types_header)); | 1691 | channel = *(current_ptr + sizeof(struct ieee_types_header)); |
@@ -1747,6 +1748,7 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
1747 | } | 1748 | } |
1748 | } | 1749 | } |
1749 | 1750 | ||
1751 | check_next_scan: | ||
1750 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); | 1752 | spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); |
1751 | if (list_empty(&adapter->scan_pending_q)) { | 1753 | if (list_empty(&adapter->scan_pending_q)) { |
1752 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); | 1754 | spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); |
@@ -1807,7 +1809,6 @@ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, | |||
1807 | } | 1809 | } |
1808 | } | 1810 | } |
1809 | 1811 | ||
1810 | done: | ||
1811 | return ret; | 1812 | return ret; |
1812 | } | 1813 | } |
1813 | 1814 | ||
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c index f2ecdeb3a90d..1535efda3d52 100644 --- a/drivers/net/wireless/rtlwifi/usb.c +++ b/drivers/net/wireless/rtlwifi/usb.c | |||
@@ -542,8 +542,8 @@ static void _rtl_rx_pre_process(struct ieee80211_hw *hw, struct sk_buff *skb) | |||
542 | WARN_ON(skb_queue_empty(&rx_queue)); | 542 | WARN_ON(skb_queue_empty(&rx_queue)); |
543 | while (!skb_queue_empty(&rx_queue)) { | 543 | while (!skb_queue_empty(&rx_queue)) { |
544 | _skb = skb_dequeue(&rx_queue); | 544 | _skb = skb_dequeue(&rx_queue); |
545 | _rtl_usb_rx_process_agg(hw, skb); | 545 | _rtl_usb_rx_process_agg(hw, _skb); |
546 | ieee80211_rx_irqsafe(hw, skb); | 546 | ieee80211_rx_irqsafe(hw, _skb); |
547 | } | 547 | } |
548 | } | 548 | } |
549 | 549 | ||
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index ebd08b21b234..959b1cd89e6a 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c | |||
@@ -165,12 +165,16 @@ static void tx_poll_stop(struct vhost_net *net) | |||
165 | } | 165 | } |
166 | 166 | ||
167 | /* Caller must have TX VQ lock */ | 167 | /* Caller must have TX VQ lock */ |
168 | static void tx_poll_start(struct vhost_net *net, struct socket *sock) | 168 | static int tx_poll_start(struct vhost_net *net, struct socket *sock) |
169 | { | 169 | { |
170 | int ret; | ||
171 | |||
170 | if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) | 172 | if (unlikely(net->tx_poll_state != VHOST_NET_POLL_STOPPED)) |
171 | return; | 173 | return 0; |
172 | vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); | 174 | ret = vhost_poll_start(net->poll + VHOST_NET_VQ_TX, sock->file); |
173 | net->tx_poll_state = VHOST_NET_POLL_STARTED; | 175 | if (!ret) |
176 | net->tx_poll_state = VHOST_NET_POLL_STARTED; | ||
177 | return ret; | ||
174 | } | 178 | } |
175 | 179 | ||
176 | /* In case of DMA done not in order in lower device driver for some reason. | 180 | /* In case of DMA done not in order in lower device driver for some reason. |
@@ -642,20 +646,23 @@ static void vhost_net_disable_vq(struct vhost_net *n, | |||
642 | vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); | 646 | vhost_poll_stop(n->poll + VHOST_NET_VQ_RX); |
643 | } | 647 | } |
644 | 648 | ||
645 | static void vhost_net_enable_vq(struct vhost_net *n, | 649 | static int vhost_net_enable_vq(struct vhost_net *n, |
646 | struct vhost_virtqueue *vq) | 650 | struct vhost_virtqueue *vq) |
647 | { | 651 | { |
648 | struct socket *sock; | 652 | struct socket *sock; |
653 | int ret; | ||
649 | 654 | ||
650 | sock = rcu_dereference_protected(vq->private_data, | 655 | sock = rcu_dereference_protected(vq->private_data, |
651 | lockdep_is_held(&vq->mutex)); | 656 | lockdep_is_held(&vq->mutex)); |
652 | if (!sock) | 657 | if (!sock) |
653 | return; | 658 | return 0; |
654 | if (vq == n->vqs + VHOST_NET_VQ_TX) { | 659 | if (vq == n->vqs + VHOST_NET_VQ_TX) { |
655 | n->tx_poll_state = VHOST_NET_POLL_STOPPED; | 660 | n->tx_poll_state = VHOST_NET_POLL_STOPPED; |
656 | tx_poll_start(n, sock); | 661 | ret = tx_poll_start(n, sock); |
657 | } else | 662 | } else |
658 | vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); | 663 | ret = vhost_poll_start(n->poll + VHOST_NET_VQ_RX, sock->file); |
664 | |||
665 | return ret; | ||
659 | } | 666 | } |
660 | 667 | ||
661 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, | 668 | static struct socket *vhost_net_stop_vq(struct vhost_net *n, |
@@ -827,15 +834,18 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
827 | r = PTR_ERR(ubufs); | 834 | r = PTR_ERR(ubufs); |
828 | goto err_ubufs; | 835 | goto err_ubufs; |
829 | } | 836 | } |
830 | oldubufs = vq->ubufs; | 837 | |
831 | vq->ubufs = ubufs; | ||
832 | vhost_net_disable_vq(n, vq); | 838 | vhost_net_disable_vq(n, vq); |
833 | rcu_assign_pointer(vq->private_data, sock); | 839 | rcu_assign_pointer(vq->private_data, sock); |
834 | vhost_net_enable_vq(n, vq); | ||
835 | |||
836 | r = vhost_init_used(vq); | 840 | r = vhost_init_used(vq); |
837 | if (r) | 841 | if (r) |
838 | goto err_vq; | 842 | goto err_used; |
843 | r = vhost_net_enable_vq(n, vq); | ||
844 | if (r) | ||
845 | goto err_used; | ||
846 | |||
847 | oldubufs = vq->ubufs; | ||
848 | vq->ubufs = ubufs; | ||
839 | 849 | ||
840 | n->tx_packets = 0; | 850 | n->tx_packets = 0; |
841 | n->tx_zcopy_err = 0; | 851 | n->tx_zcopy_err = 0; |
@@ -859,6 +869,11 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd) | |||
859 | mutex_unlock(&n->dev.mutex); | 869 | mutex_unlock(&n->dev.mutex); |
860 | return 0; | 870 | return 0; |
861 | 871 | ||
872 | err_used: | ||
873 | rcu_assign_pointer(vq->private_data, oldsock); | ||
874 | vhost_net_enable_vq(n, vq); | ||
875 | if (ubufs) | ||
876 | vhost_ubuf_put_and_wait(ubufs); | ||
862 | err_ubufs: | 877 | err_ubufs: |
863 | fput(sock->file); | 878 | fput(sock->file); |
864 | err_vq: | 879 | err_vq: |
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c index 34389f75fe65..9759249e6d90 100644 --- a/drivers/vhost/vhost.c +++ b/drivers/vhost/vhost.c | |||
@@ -77,26 +77,38 @@ void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | |||
77 | init_poll_funcptr(&poll->table, vhost_poll_func); | 77 | init_poll_funcptr(&poll->table, vhost_poll_func); |
78 | poll->mask = mask; | 78 | poll->mask = mask; |
79 | poll->dev = dev; | 79 | poll->dev = dev; |
80 | poll->wqh = NULL; | ||
80 | 81 | ||
81 | vhost_work_init(&poll->work, fn); | 82 | vhost_work_init(&poll->work, fn); |
82 | } | 83 | } |
83 | 84 | ||
84 | /* Start polling a file. We add ourselves to file's wait queue. The caller must | 85 | /* Start polling a file. We add ourselves to file's wait queue. The caller must |
85 | * keep a reference to a file until after vhost_poll_stop is called. */ | 86 | * keep a reference to a file until after vhost_poll_stop is called. */ |
86 | void vhost_poll_start(struct vhost_poll *poll, struct file *file) | 87 | int vhost_poll_start(struct vhost_poll *poll, struct file *file) |
87 | { | 88 | { |
88 | unsigned long mask; | 89 | unsigned long mask; |
90 | int ret = 0; | ||
89 | 91 | ||
90 | mask = file->f_op->poll(file, &poll->table); | 92 | mask = file->f_op->poll(file, &poll->table); |
91 | if (mask) | 93 | if (mask) |
92 | vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); | 94 | vhost_poll_wakeup(&poll->wait, 0, 0, (void *)mask); |
95 | if (mask & POLLERR) { | ||
96 | if (poll->wqh) | ||
97 | remove_wait_queue(poll->wqh, &poll->wait); | ||
98 | ret = -EINVAL; | ||
99 | } | ||
100 | |||
101 | return ret; | ||
93 | } | 102 | } |
94 | 103 | ||
95 | /* Stop polling a file. After this function returns, it becomes safe to drop the | 104 | /* Stop polling a file. After this function returns, it becomes safe to drop the |
96 | * file reference. You must also flush afterwards. */ | 105 | * file reference. You must also flush afterwards. */ |
97 | void vhost_poll_stop(struct vhost_poll *poll) | 106 | void vhost_poll_stop(struct vhost_poll *poll) |
98 | { | 107 | { |
99 | remove_wait_queue(poll->wqh, &poll->wait); | 108 | if (poll->wqh) { |
109 | remove_wait_queue(poll->wqh, &poll->wait); | ||
110 | poll->wqh = NULL; | ||
111 | } | ||
100 | } | 112 | } |
101 | 113 | ||
102 | static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, | 114 | static bool vhost_work_seq_done(struct vhost_dev *dev, struct vhost_work *work, |
@@ -792,7 +804,7 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp) | |||
792 | fput(filep); | 804 | fput(filep); |
793 | 805 | ||
794 | if (pollstart && vq->handle_kick) | 806 | if (pollstart && vq->handle_kick) |
795 | vhost_poll_start(&vq->poll, vq->kick); | 807 | r = vhost_poll_start(&vq->poll, vq->kick); |
796 | 808 | ||
797 | mutex_unlock(&vq->mutex); | 809 | mutex_unlock(&vq->mutex); |
798 | 810 | ||
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h index 2639c58b23ab..17261e277c02 100644 --- a/drivers/vhost/vhost.h +++ b/drivers/vhost/vhost.h | |||
@@ -42,7 +42,7 @@ void vhost_work_queue(struct vhost_dev *dev, struct vhost_work *work); | |||
42 | 42 | ||
43 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, | 43 | void vhost_poll_init(struct vhost_poll *poll, vhost_work_fn_t fn, |
44 | unsigned long mask, struct vhost_dev *dev); | 44 | unsigned long mask, struct vhost_dev *dev); |
45 | void vhost_poll_start(struct vhost_poll *poll, struct file *file); | 45 | int vhost_poll_start(struct vhost_poll *poll, struct file *file); |
46 | void vhost_poll_stop(struct vhost_poll *poll); | 46 | void vhost_poll_stop(struct vhost_poll *poll); |
47 | void vhost_poll_flush(struct vhost_poll *poll); | 47 | void vhost_poll_flush(struct vhost_poll *poll); |
48 | void vhost_poll_queue(struct vhost_poll *poll); | 48 | void vhost_poll_queue(struct vhost_poll *poll); |
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h index 5de7a220e986..0de078d4cdb9 100644 --- a/include/linux/usb/usbnet.h +++ b/include/linux/usb/usbnet.h | |||
@@ -33,6 +33,7 @@ struct usbnet { | |||
33 | wait_queue_head_t *wait; | 33 | wait_queue_head_t *wait; |
34 | struct mutex phy_mutex; | 34 | struct mutex phy_mutex; |
35 | unsigned char suspend_count; | 35 | unsigned char suspend_count; |
36 | unsigned char pkt_cnt, pkt_err; | ||
36 | 37 | ||
37 | /* i/o info: pipes etc */ | 38 | /* i/o info: pipes etc */ |
38 | unsigned in, out; | 39 | unsigned in, out; |
@@ -70,6 +71,7 @@ struct usbnet { | |||
70 | # define EVENT_DEV_OPEN 7 | 71 | # define EVENT_DEV_OPEN 7 |
71 | # define EVENT_DEVICE_REPORT_IDLE 8 | 72 | # define EVENT_DEVICE_REPORT_IDLE 8 |
72 | # define EVENT_NO_RUNTIME_PM 9 | 73 | # define EVENT_NO_RUNTIME_PM 9 |
74 | # define EVENT_RX_KILL 10 | ||
73 | }; | 75 | }; |
74 | 76 | ||
75 | static inline struct usb_driver *driver_of(struct usb_interface *intf) | 77 | static inline struct usb_driver *driver_of(struct usb_interface *intf) |
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h index 498433dd067d..938b7fd11204 100644 --- a/include/net/transp_v6.h +++ b/include/net/transp_v6.h | |||
@@ -34,17 +34,17 @@ extern int udpv6_connect(struct sock *sk, | |||
34 | struct sockaddr *uaddr, | 34 | struct sockaddr *uaddr, |
35 | int addr_len); | 35 | int addr_len); |
36 | 36 | ||
37 | extern int datagram_recv_ctl(struct sock *sk, | 37 | extern int ip6_datagram_recv_ctl(struct sock *sk, |
38 | struct msghdr *msg, | 38 | struct msghdr *msg, |
39 | struct sk_buff *skb); | 39 | struct sk_buff *skb); |
40 | 40 | ||
41 | extern int datagram_send_ctl(struct net *net, | 41 | extern int ip6_datagram_send_ctl(struct net *net, |
42 | struct sock *sk, | 42 | struct sock *sk, |
43 | struct msghdr *msg, | 43 | struct msghdr *msg, |
44 | struct flowi6 *fl6, | 44 | struct flowi6 *fl6, |
45 | struct ipv6_txoptions *opt, | 45 | struct ipv6_txoptions *opt, |
46 | int *hlimit, int *tclass, | 46 | int *hlimit, int *tclass, |
47 | int *dontfrag); | 47 | int *dontfrag); |
48 | 48 | ||
49 | #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) | 49 | #define LOOPBACK4_IPV6 cpu_to_be32(0x7f000006) |
50 | 50 | ||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c index 797769551b91..2201e699ad67 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c | |||
@@ -1790,10 +1790,13 @@ static ssize_t pktgen_thread_write(struct file *file, | |||
1790 | return -EFAULT; | 1790 | return -EFAULT; |
1791 | i += len; | 1791 | i += len; |
1792 | mutex_lock(&pktgen_thread_lock); | 1792 | mutex_lock(&pktgen_thread_lock); |
1793 | pktgen_add_device(t, f); | 1793 | ret = pktgen_add_device(t, f); |
1794 | mutex_unlock(&pktgen_thread_lock); | 1794 | mutex_unlock(&pktgen_thread_lock); |
1795 | ret = count; | 1795 | if (!ret) { |
1796 | sprintf(pg_result, "OK: add_device=%s", f); | 1796 | ret = count; |
1797 | sprintf(pg_result, "OK: add_device=%s", f); | ||
1798 | } else | ||
1799 | sprintf(pg_result, "ERROR: can not add device %s", f); | ||
1797 | goto out; | 1800 | goto out; |
1798 | } | 1801 | } |
1799 | 1802 | ||
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index bddc1dd2e7f2..55f7ef6ada6d 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -686,7 +686,7 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old) | |||
686 | new->network_header = old->network_header; | 686 | new->network_header = old->network_header; |
687 | new->mac_header = old->mac_header; | 687 | new->mac_header = old->mac_header; |
688 | new->inner_transport_header = old->inner_transport_header; | 688 | new->inner_transport_header = old->inner_transport_header; |
689 | new->inner_network_header = old->inner_transport_header; | 689 | new->inner_network_header = old->inner_network_header; |
690 | skb_dst_copy(new, old); | 690 | skb_dst_copy(new, old); |
691 | new->rxhash = old->rxhash; | 691 | new->rxhash = old->rxhash; |
692 | new->ooo_okay = old->ooo_okay; | 692 | new->ooo_okay = old->ooo_okay; |
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c index 291f2ed7cc31..cdf2e707bb10 100644 --- a/net/ipv4/tcp_cong.c +++ b/net/ipv4/tcp_cong.c | |||
@@ -310,6 +310,12 @@ void tcp_slow_start(struct tcp_sock *tp) | |||
310 | { | 310 | { |
311 | int cnt; /* increase in packets */ | 311 | int cnt; /* increase in packets */ |
312 | unsigned int delta = 0; | 312 | unsigned int delta = 0; |
313 | u32 snd_cwnd = tp->snd_cwnd; | ||
314 | |||
315 | if (unlikely(!snd_cwnd)) { | ||
316 | pr_err_once("snd_cwnd is nul, please report this bug.\n"); | ||
317 | snd_cwnd = 1U; | ||
318 | } | ||
313 | 319 | ||
314 | /* RFC3465: ABC Slow start | 320 | /* RFC3465: ABC Slow start |
315 | * Increase only after a full MSS of bytes is acked | 321 | * Increase only after a full MSS of bytes is acked |
@@ -324,7 +330,7 @@ void tcp_slow_start(struct tcp_sock *tp) | |||
324 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) | 330 | if (sysctl_tcp_max_ssthresh > 0 && tp->snd_cwnd > sysctl_tcp_max_ssthresh) |
325 | cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ | 331 | cnt = sysctl_tcp_max_ssthresh >> 1; /* limited slow start */ |
326 | else | 332 | else |
327 | cnt = tp->snd_cwnd; /* exponential increase */ | 333 | cnt = snd_cwnd; /* exponential increase */ |
328 | 334 | ||
329 | /* RFC3465: ABC | 335 | /* RFC3465: ABC |
330 | * We MAY increase by 2 if discovered delayed ack | 336 | * We MAY increase by 2 if discovered delayed ack |
@@ -334,11 +340,11 @@ void tcp_slow_start(struct tcp_sock *tp) | |||
334 | tp->bytes_acked = 0; | 340 | tp->bytes_acked = 0; |
335 | 341 | ||
336 | tp->snd_cwnd_cnt += cnt; | 342 | tp->snd_cwnd_cnt += cnt; |
337 | while (tp->snd_cwnd_cnt >= tp->snd_cwnd) { | 343 | while (tp->snd_cwnd_cnt >= snd_cwnd) { |
338 | tp->snd_cwnd_cnt -= tp->snd_cwnd; | 344 | tp->snd_cwnd_cnt -= snd_cwnd; |
339 | delta++; | 345 | delta++; |
340 | } | 346 | } |
341 | tp->snd_cwnd = min(tp->snd_cwnd + delta, tp->snd_cwnd_clamp); | 347 | tp->snd_cwnd = min(snd_cwnd + delta, tp->snd_cwnd_clamp); |
342 | } | 348 | } |
343 | EXPORT_SYMBOL_GPL(tcp_slow_start); | 349 | EXPORT_SYMBOL_GPL(tcp_slow_start); |
344 | 350 | ||
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 492c7cfe1453..e376aa9591bc 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
@@ -3482,7 +3482,8 @@ static bool tcp_process_frto(struct sock *sk, int flag) | |||
3482 | ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) | 3482 | ((tp->frto_counter >= 2) && (flag & FLAG_RETRANS_DATA_ACKED))) |
3483 | tp->undo_marker = 0; | 3483 | tp->undo_marker = 0; |
3484 | 3484 | ||
3485 | if (!before(tp->snd_una, tp->frto_highmark)) { | 3485 | if (!before(tp->snd_una, tp->frto_highmark) || |
3486 | !tcp_packets_in_flight(tp)) { | ||
3486 | tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); | 3487 | tcp_enter_frto_loss(sk, (tp->frto_counter == 1 ? 2 : 3), flag); |
3487 | return true; | 3488 | return true; |
3488 | } | 3489 | } |
@@ -5647,8 +5648,7 @@ static bool tcp_rcv_fastopen_synack(struct sock *sk, struct sk_buff *synack, | |||
5647 | * the remote receives only the retransmitted (regular) SYNs: either | 5648 | * the remote receives only the retransmitted (regular) SYNs: either |
5648 | * the original SYN-data or the corresponding SYN-ACK is lost. | 5649 | * the original SYN-data or the corresponding SYN-ACK is lost. |
5649 | */ | 5650 | */ |
5650 | syn_drop = (cookie->len <= 0 && data && | 5651 | syn_drop = (cookie->len <= 0 && data && tp->total_retrans); |
5651 | inet_csk(sk)->icsk_retransmits); | ||
5652 | 5652 | ||
5653 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); | 5653 | tcp_fastopen_cache_set(sk, mss, cookie, syn_drop); |
5654 | 5654 | ||
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 5a1cfc692df0..0eaf685bddc9 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
@@ -496,6 +496,7 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) | |||
496 | * errors returned from accept(). | 496 | * errors returned from accept(). |
497 | */ | 497 | */ |
498 | inet_csk_reqsk_queue_drop(sk, req, prev); | 498 | inet_csk_reqsk_queue_drop(sk, req, prev); |
499 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
499 | goto out; | 500 | goto out; |
500 | 501 | ||
501 | case TCP_SYN_SENT: | 502 | case TCP_SYN_SENT: |
@@ -1501,8 +1502,10 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb) | |||
1501 | * clogging syn queue with openreqs with exponentially increasing | 1502 | * clogging syn queue with openreqs with exponentially increasing |
1502 | * timeout. | 1503 | * timeout. |
1503 | */ | 1504 | */ |
1504 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | 1505 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { |
1506 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | ||
1505 | goto drop; | 1507 | goto drop; |
1508 | } | ||
1506 | 1509 | ||
1507 | req = inet_reqsk_alloc(&tcp_request_sock_ops); | 1510 | req = inet_reqsk_alloc(&tcp_request_sock_ops); |
1508 | if (!req) | 1511 | if (!req) |
@@ -1667,6 +1670,7 @@ drop_and_release: | |||
1667 | drop_and_free: | 1670 | drop_and_free: |
1668 | reqsk_free(req); | 1671 | reqsk_free(req); |
1669 | drop: | 1672 | drop: |
1673 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
1670 | return 0; | 1674 | return 0; |
1671 | } | 1675 | } |
1672 | EXPORT_SYMBOL(tcp_v4_conn_request); | 1676 | EXPORT_SYMBOL(tcp_v4_conn_request); |
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 7f7332b44699..bd9f9360f769 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c | |||
@@ -1656,6 +1656,7 @@ static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev) | |||
1656 | if (dev->addr_len != IEEE802154_ADDR_LEN) | 1656 | if (dev->addr_len != IEEE802154_ADDR_LEN) |
1657 | return -1; | 1657 | return -1; |
1658 | memcpy(eui, dev->dev_addr, 8); | 1658 | memcpy(eui, dev->dev_addr, 8); |
1659 | eui[0] ^= 2; | ||
1659 | return 0; | 1660 | return 0; |
1660 | } | 1661 | } |
1661 | 1662 | ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 33be36398a78..f5a54782a340 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
@@ -380,7 +380,7 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len) | |||
380 | if (skb->protocol == htons(ETH_P_IPV6)) { | 380 | if (skb->protocol == htons(ETH_P_IPV6)) { |
381 | sin->sin6_addr = ipv6_hdr(skb)->saddr; | 381 | sin->sin6_addr = ipv6_hdr(skb)->saddr; |
382 | if (np->rxopt.all) | 382 | if (np->rxopt.all) |
383 | datagram_recv_ctl(sk, msg, skb); | 383 | ip6_datagram_recv_ctl(sk, msg, skb); |
384 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) | 384 | if (ipv6_addr_type(&sin->sin6_addr) & IPV6_ADDR_LINKLOCAL) |
385 | sin->sin6_scope_id = IP6CB(skb)->iif; | 385 | sin->sin6_scope_id = IP6CB(skb)->iif; |
386 | } else { | 386 | } else { |
@@ -468,7 +468,8 @@ out: | |||
468 | } | 468 | } |
469 | 469 | ||
470 | 470 | ||
471 | int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | 471 | int ip6_datagram_recv_ctl(struct sock *sk, struct msghdr *msg, |
472 | struct sk_buff *skb) | ||
472 | { | 473 | { |
473 | struct ipv6_pinfo *np = inet6_sk(sk); | 474 | struct ipv6_pinfo *np = inet6_sk(sk); |
474 | struct inet6_skb_parm *opt = IP6CB(skb); | 475 | struct inet6_skb_parm *opt = IP6CB(skb); |
@@ -598,11 +599,12 @@ int datagram_recv_ctl(struct sock *sk, struct msghdr *msg, struct sk_buff *skb) | |||
598 | } | 599 | } |
599 | return 0; | 600 | return 0; |
600 | } | 601 | } |
602 | EXPORT_SYMBOL_GPL(ip6_datagram_recv_ctl); | ||
601 | 603 | ||
602 | int datagram_send_ctl(struct net *net, struct sock *sk, | 604 | int ip6_datagram_send_ctl(struct net *net, struct sock *sk, |
603 | struct msghdr *msg, struct flowi6 *fl6, | 605 | struct msghdr *msg, struct flowi6 *fl6, |
604 | struct ipv6_txoptions *opt, | 606 | struct ipv6_txoptions *opt, |
605 | int *hlimit, int *tclass, int *dontfrag) | 607 | int *hlimit, int *tclass, int *dontfrag) |
606 | { | 608 | { |
607 | struct in6_pktinfo *src_info; | 609 | struct in6_pktinfo *src_info; |
608 | struct cmsghdr *cmsg; | 610 | struct cmsghdr *cmsg; |
@@ -872,4 +874,4 @@ int datagram_send_ctl(struct net *net, struct sock *sk, | |||
872 | exit_f: | 874 | exit_f: |
873 | return err; | 875 | return err; |
874 | } | 876 | } |
875 | EXPORT_SYMBOL_GPL(datagram_send_ctl); | 877 | EXPORT_SYMBOL_GPL(ip6_datagram_send_ctl); |
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c index 22494afd981c..ea42bf40a997 100644 --- a/net/ipv6/ip6_flowlabel.c +++ b/net/ipv6/ip6_flowlabel.c | |||
@@ -390,8 +390,8 @@ fl_create(struct net *net, struct sock *sk, struct in6_flowlabel_req *freq, | |||
390 | msg.msg_control = (void*)(fl->opt+1); | 390 | msg.msg_control = (void*)(fl->opt+1); |
391 | memset(&flowi6, 0, sizeof(flowi6)); | 391 | memset(&flowi6, 0, sizeof(flowi6)); |
392 | 392 | ||
393 | err = datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, &junk, | 393 | err = ip6_datagram_send_ctl(net, sk, &msg, &flowi6, fl->opt, |
394 | &junk, &junk); | 394 | &junk, &junk, &junk); |
395 | if (err) | 395 | if (err) |
396 | goto done; | 396 | goto done; |
397 | err = -EINVAL; | 397 | err = -EINVAL; |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index ee94d31c9d4d..d1e2e8ef29c5 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
@@ -476,8 +476,8 @@ sticky_done: | |||
476 | msg.msg_controllen = optlen; | 476 | msg.msg_controllen = optlen; |
477 | msg.msg_control = (void*)(opt+1); | 477 | msg.msg_control = (void*)(opt+1); |
478 | 478 | ||
479 | retv = datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, &junk, | 479 | retv = ip6_datagram_send_ctl(net, sk, &msg, &fl6, opt, &junk, |
480 | &junk); | 480 | &junk, &junk); |
481 | if (retv) | 481 | if (retv) |
482 | goto done; | 482 | goto done; |
483 | update: | 483 | update: |
@@ -1002,7 +1002,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
1002 | release_sock(sk); | 1002 | release_sock(sk); |
1003 | 1003 | ||
1004 | if (skb) { | 1004 | if (skb) { |
1005 | int err = datagram_recv_ctl(sk, &msg, skb); | 1005 | int err = ip6_datagram_recv_ctl(sk, &msg, skb); |
1006 | kfree_skb(skb); | 1006 | kfree_skb(skb); |
1007 | if (err) | 1007 | if (err) |
1008 | return err; | 1008 | return err; |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 6cd29b1e8b92..70fa81449997 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
@@ -507,7 +507,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
507 | sock_recv_ts_and_drops(msg, sk, skb); | 507 | sock_recv_ts_and_drops(msg, sk, skb); |
508 | 508 | ||
509 | if (np->rxopt.all) | 509 | if (np->rxopt.all) |
510 | datagram_recv_ctl(sk, msg, skb); | 510 | ip6_datagram_recv_ctl(sk, msg, skb); |
511 | 511 | ||
512 | err = copied; | 512 | err = copied; |
513 | if (flags & MSG_TRUNC) | 513 | if (flags & MSG_TRUNC) |
@@ -822,8 +822,8 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
822 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 822 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
823 | opt->tot_len = sizeof(struct ipv6_txoptions); | 823 | opt->tot_len = sizeof(struct ipv6_txoptions); |
824 | 824 | ||
825 | err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, | 825 | err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, |
826 | &hlimit, &tclass, &dontfrag); | 826 | &hlimit, &tclass, &dontfrag); |
827 | if (err < 0) { | 827 | if (err < 0) { |
828 | fl6_sock_release(flowlabel); | 828 | fl6_sock_release(flowlabel); |
829 | return err; | 829 | return err; |
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 06087e58738a..bbb28ae7e5f3 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c | |||
@@ -423,6 +423,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, | |||
423 | } | 423 | } |
424 | 424 | ||
425 | inet_csk_reqsk_queue_drop(sk, req, prev); | 425 | inet_csk_reqsk_queue_drop(sk, req, prev); |
426 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
426 | goto out; | 427 | goto out; |
427 | 428 | ||
428 | case TCP_SYN_SENT: | 429 | case TCP_SYN_SENT: |
@@ -959,8 +960,10 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb) | |||
959 | goto drop; | 960 | goto drop; |
960 | } | 961 | } |
961 | 962 | ||
962 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) | 963 | if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) { |
964 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS); | ||
963 | goto drop; | 965 | goto drop; |
966 | } | ||
964 | 967 | ||
965 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); | 968 | req = inet6_reqsk_alloc(&tcp6_request_sock_ops); |
966 | if (req == NULL) | 969 | if (req == NULL) |
@@ -1109,6 +1112,7 @@ drop_and_release: | |||
1109 | drop_and_free: | 1112 | drop_and_free: |
1110 | reqsk_free(req); | 1113 | reqsk_free(req); |
1111 | drop: | 1114 | drop: |
1115 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS); | ||
1112 | return 0; /* don't send reset */ | 1116 | return 0; /* don't send reset */ |
1113 | } | 1117 | } |
1114 | 1118 | ||
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index cb5bf497c09c..599e1ba6d1ce 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c | |||
@@ -467,7 +467,7 @@ try_again: | |||
467 | ip_cmsg_recv(msg, skb); | 467 | ip_cmsg_recv(msg, skb); |
468 | } else { | 468 | } else { |
469 | if (np->rxopt.all) | 469 | if (np->rxopt.all) |
470 | datagram_recv_ctl(sk, msg, skb); | 470 | ip6_datagram_recv_ctl(sk, msg, skb); |
471 | } | 471 | } |
472 | 472 | ||
473 | err = copied; | 473 | err = copied; |
@@ -1143,8 +1143,8 @@ do_udp_sendmsg: | |||
1143 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 1143 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
1144 | opt->tot_len = sizeof(*opt); | 1144 | opt->tot_len = sizeof(*opt); |
1145 | 1145 | ||
1146 | err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, | 1146 | err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, |
1147 | &hlimit, &tclass, &dontfrag); | 1147 | &hlimit, &tclass, &dontfrag); |
1148 | if (err < 0) { | 1148 | if (err < 0) { |
1149 | fl6_sock_release(flowlabel); | 1149 | fl6_sock_release(flowlabel); |
1150 | return err; | 1150 | return err; |
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index 1a9f3723c13c..06389d5ff120 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c | |||
@@ -168,6 +168,51 @@ l2tp_session_id_hash_2(struct l2tp_net *pn, u32 session_id) | |||
168 | 168 | ||
169 | } | 169 | } |
170 | 170 | ||
171 | /* Lookup the tunnel socket, possibly involving the fs code if the socket is | ||
172 | * owned by userspace. A struct sock returned from this function must be | ||
173 | * released using l2tp_tunnel_sock_put once you're done with it. | ||
174 | */ | ||
175 | struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel) | ||
176 | { | ||
177 | int err = 0; | ||
178 | struct socket *sock = NULL; | ||
179 | struct sock *sk = NULL; | ||
180 | |||
181 | if (!tunnel) | ||
182 | goto out; | ||
183 | |||
184 | if (tunnel->fd >= 0) { | ||
185 | /* Socket is owned by userspace, who might be in the process | ||
186 | * of closing it. Look the socket up using the fd to ensure | ||
187 | * consistency. | ||
188 | */ | ||
189 | sock = sockfd_lookup(tunnel->fd, &err); | ||
190 | if (sock) | ||
191 | sk = sock->sk; | ||
192 | } else { | ||
193 | /* Socket is owned by kernelspace */ | ||
194 | sk = tunnel->sock; | ||
195 | } | ||
196 | |||
197 | out: | ||
198 | return sk; | ||
199 | } | ||
200 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_lookup); | ||
201 | |||
202 | /* Drop a reference to a tunnel socket obtained via. l2tp_tunnel_sock_put */ | ||
203 | void l2tp_tunnel_sock_put(struct sock *sk) | ||
204 | { | ||
205 | struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); | ||
206 | if (tunnel) { | ||
207 | if (tunnel->fd >= 0) { | ||
208 | /* Socket is owned by userspace */ | ||
209 | sockfd_put(sk->sk_socket); | ||
210 | } | ||
211 | sock_put(sk); | ||
212 | } | ||
213 | } | ||
214 | EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); | ||
215 | |||
171 | /* Lookup a session by id in the global session list | 216 | /* Lookup a session by id in the global session list |
172 | */ | 217 | */ |
173 | static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) | 218 | static struct l2tp_session *l2tp_session_find_2(struct net *net, u32 session_id) |
@@ -1607,6 +1652,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 | |||
1607 | tunnel->old_sk_destruct = sk->sk_destruct; | 1652 | tunnel->old_sk_destruct = sk->sk_destruct; |
1608 | sk->sk_destruct = &l2tp_tunnel_destruct; | 1653 | sk->sk_destruct = &l2tp_tunnel_destruct; |
1609 | tunnel->sock = sk; | 1654 | tunnel->sock = sk; |
1655 | tunnel->fd = fd; | ||
1610 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); | 1656 | lockdep_set_class_and_name(&sk->sk_lock.slock, &l2tp_socket_class, "l2tp_sock"); |
1611 | 1657 | ||
1612 | sk->sk_allocation = GFP_ATOMIC; | 1658 | sk->sk_allocation = GFP_ATOMIC; |
@@ -1642,24 +1688,32 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create); | |||
1642 | */ | 1688 | */ |
1643 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) | 1689 | int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel) |
1644 | { | 1690 | { |
1645 | int err = 0; | 1691 | int err = -EBADF; |
1646 | struct socket *sock = tunnel->sock ? tunnel->sock->sk_socket : NULL; | 1692 | struct socket *sock = NULL; |
1693 | struct sock *sk = NULL; | ||
1694 | |||
1695 | sk = l2tp_tunnel_sock_lookup(tunnel); | ||
1696 | if (!sk) | ||
1697 | goto out; | ||
1698 | |||
1699 | sock = sk->sk_socket; | ||
1700 | BUG_ON(!sock); | ||
1647 | 1701 | ||
1648 | /* Force the tunnel socket to close. This will eventually | 1702 | /* Force the tunnel socket to close. This will eventually |
1649 | * cause the tunnel to be deleted via the normal socket close | 1703 | * cause the tunnel to be deleted via the normal socket close |
1650 | * mechanisms when userspace closes the tunnel socket. | 1704 | * mechanisms when userspace closes the tunnel socket. |
1651 | */ | 1705 | */ |
1652 | if (sock != NULL) { | 1706 | err = inet_shutdown(sock, 2); |
1653 | err = inet_shutdown(sock, 2); | ||
1654 | 1707 | ||
1655 | /* If the tunnel's socket was created by the kernel, | 1708 | /* If the tunnel's socket was created by the kernel, |
1656 | * close the socket here since the socket was not | 1709 | * close the socket here since the socket was not |
1657 | * created by userspace. | 1710 | * created by userspace. |
1658 | */ | 1711 | */ |
1659 | if (sock->file == NULL) | 1712 | if (sock->file == NULL) |
1660 | err = inet_release(sock); | 1713 | err = inet_release(sock); |
1661 | } | ||
1662 | 1714 | ||
1715 | l2tp_tunnel_sock_put(sk); | ||
1716 | out: | ||
1663 | return err; | 1717 | return err; |
1664 | } | 1718 | } |
1665 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); | 1719 | EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); |
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 56d583e083a7..e62204cad4fe 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h | |||
@@ -188,7 +188,8 @@ struct l2tp_tunnel { | |||
188 | int (*recv_payload_hook)(struct sk_buff *skb); | 188 | int (*recv_payload_hook)(struct sk_buff *skb); |
189 | void (*old_sk_destruct)(struct sock *); | 189 | void (*old_sk_destruct)(struct sock *); |
190 | struct sock *sock; /* Parent socket */ | 190 | struct sock *sock; /* Parent socket */ |
191 | int fd; | 191 | int fd; /* Parent fd, if tunnel socket |
192 | * was created by userspace */ | ||
192 | 193 | ||
193 | uint8_t priv[0]; /* private data */ | 194 | uint8_t priv[0]; /* private data */ |
194 | }; | 195 | }; |
@@ -228,6 +229,8 @@ out: | |||
228 | return tunnel; | 229 | return tunnel; |
229 | } | 230 | } |
230 | 231 | ||
232 | extern struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel); | ||
233 | extern void l2tp_tunnel_sock_put(struct sock *sk); | ||
231 | extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); | 234 | extern struct l2tp_session *l2tp_session_find(struct net *net, struct l2tp_tunnel *tunnel, u32 session_id); |
232 | extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); | 235 | extern struct l2tp_session *l2tp_session_find_nth(struct l2tp_tunnel *tunnel, int nth); |
233 | extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); | 236 | extern struct l2tp_session *l2tp_session_find_by_ifname(struct net *net, char *ifname); |
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 927547171bc7..8ee4a86ae996 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c | |||
@@ -554,8 +554,8 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk, | |||
554 | memset(opt, 0, sizeof(struct ipv6_txoptions)); | 554 | memset(opt, 0, sizeof(struct ipv6_txoptions)); |
555 | opt->tot_len = sizeof(struct ipv6_txoptions); | 555 | opt->tot_len = sizeof(struct ipv6_txoptions); |
556 | 556 | ||
557 | err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, | 557 | err = ip6_datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt, |
558 | &hlimit, &tclass, &dontfrag); | 558 | &hlimit, &tclass, &dontfrag); |
559 | if (err < 0) { | 559 | if (err < 0) { |
560 | fl6_sock_release(flowlabel); | 560 | fl6_sock_release(flowlabel); |
561 | return err; | 561 | return err; |
@@ -646,7 +646,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
646 | struct msghdr *msg, size_t len, int noblock, | 646 | struct msghdr *msg, size_t len, int noblock, |
647 | int flags, int *addr_len) | 647 | int flags, int *addr_len) |
648 | { | 648 | { |
649 | struct inet_sock *inet = inet_sk(sk); | 649 | struct ipv6_pinfo *np = inet6_sk(sk); |
650 | struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; | 650 | struct sockaddr_l2tpip6 *lsa = (struct sockaddr_l2tpip6 *)msg->msg_name; |
651 | size_t copied = 0; | 651 | size_t copied = 0; |
652 | int err = -EOPNOTSUPP; | 652 | int err = -EOPNOTSUPP; |
@@ -688,8 +688,8 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk, | |||
688 | lsa->l2tp_scope_id = IP6CB(skb)->iif; | 688 | lsa->l2tp_scope_id = IP6CB(skb)->iif; |
689 | } | 689 | } |
690 | 690 | ||
691 | if (inet->cmsg_flags) | 691 | if (np->rxopt.all) |
692 | ip_cmsg_recv(msg, skb); | 692 | ip6_datagram_recv_ctl(sk, msg, skb); |
693 | 693 | ||
694 | if (flags & MSG_TRUNC) | 694 | if (flags & MSG_TRUNC) |
695 | copied = skb->len; | 695 | copied = skb->len; |
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index e639645e8fec..c111bd0e083a 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c | |||
@@ -2361,13 +2361,15 @@ static int packet_release(struct socket *sock) | |||
2361 | 2361 | ||
2362 | packet_flush_mclist(sk); | 2362 | packet_flush_mclist(sk); |
2363 | 2363 | ||
2364 | memset(&req_u, 0, sizeof(req_u)); | 2364 | if (po->rx_ring.pg_vec) { |
2365 | 2365 | memset(&req_u, 0, sizeof(req_u)); | |
2366 | if (po->rx_ring.pg_vec) | ||
2367 | packet_set_ring(sk, &req_u, 1, 0); | 2366 | packet_set_ring(sk, &req_u, 1, 0); |
2367 | } | ||
2368 | 2368 | ||
2369 | if (po->tx_ring.pg_vec) | 2369 | if (po->tx_ring.pg_vec) { |
2370 | memset(&req_u, 0, sizeof(req_u)); | ||
2370 | packet_set_ring(sk, &req_u, 1, 1); | 2371 | packet_set_ring(sk, &req_u, 1, 1); |
2372 | } | ||
2371 | 2373 | ||
2372 | fanout_release(sk); | 2374 | fanout_release(sk); |
2373 | 2375 | ||
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index 298c0ddfb57e..3d2acc7a9c80 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c | |||
@@ -438,18 +438,18 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) | |||
438 | if (q->rate) { | 438 | if (q->rate) { |
439 | struct sk_buff_head *list = &sch->q; | 439 | struct sk_buff_head *list = &sch->q; |
440 | 440 | ||
441 | delay += packet_len_2_sched_time(skb->len, q); | ||
442 | |||
443 | if (!skb_queue_empty(list)) { | 441 | if (!skb_queue_empty(list)) { |
444 | /* | 442 | /* |
445 | * Last packet in queue is reference point (now). | 443 | * Last packet in queue is reference point (now), |
446 | * First packet in queue is already in flight, | 444 | * calculate this time bonus and subtract |
447 | * calculate this time bonus and substract | ||
448 | * from delay. | 445 | * from delay. |
449 | */ | 446 | */ |
450 | delay -= now - netem_skb_cb(skb_peek(list))->time_to_send; | 447 | delay -= netem_skb_cb(skb_peek_tail(list))->time_to_send - now; |
448 | delay = max_t(psched_tdiff_t, 0, delay); | ||
451 | now = netem_skb_cb(skb_peek_tail(list))->time_to_send; | 449 | now = netem_skb_cb(skb_peek_tail(list))->time_to_send; |
452 | } | 450 | } |
451 | |||
452 | delay += packet_len_2_sched_time(skb->len, q); | ||
453 | } | 453 | } |
454 | 454 | ||
455 | cb->time_to_send = now + delay; | 455 | cb->time_to_send = now + delay; |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 0a148c9d2a5c..0f679df7d072 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -465,7 +465,7 @@ static int svc_udp_get_dest_address4(struct svc_rqst *rqstp, | |||
465 | } | 465 | } |
466 | 466 | ||
467 | /* | 467 | /* |
468 | * See net/ipv6/datagram.c : datagram_recv_ctl | 468 | * See net/ipv6/datagram.c : ip6_datagram_recv_ctl |
469 | */ | 469 | */ |
470 | static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, | 470 | static int svc_udp_get_dest_address6(struct svc_rqst *rqstp, |
471 | struct cmsghdr *cmh) | 471 | struct cmsghdr *cmh) |
diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 01592d7d4789..45f1618c8e23 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c | |||
@@ -1358,7 +1358,7 @@ ieee80211_bss(struct wiphy *wiphy, struct iw_request_info *info, | |||
1358 | &iwe, IW_EV_UINT_LEN); | 1358 | &iwe, IW_EV_UINT_LEN); |
1359 | } | 1359 | } |
1360 | 1360 | ||
1361 | buf = kmalloc(30, GFP_ATOMIC); | 1361 | buf = kmalloc(31, GFP_ATOMIC); |
1362 | if (buf) { | 1362 | if (buf) { |
1363 | memset(&iwe, 0, sizeof(iwe)); | 1363 | memset(&iwe, 0, sizeof(iwe)); |
1364 | iwe.cmd = IWEVCUSTOM; | 1364 | iwe.cmd = IWEVCUSTOM; |