diff options
40 files changed, 130 insertions, 228 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 88aac5f81e23..a536396a4af1 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
| @@ -995,8 +995,8 @@ L: netdev@vger.kernel.org | |||
| 995 | S: Supported | 995 | S: Supported |
| 996 | 996 | ||
| 997 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER | 997 | BROADCOM BNX2X 10 GIGABIT ETHERNET DRIVER |
| 998 | P: Eliezer Tamir | 998 | P: Eilon Greenstein |
| 999 | M: eliezert@broadcom.com | 999 | M: eilong@broadcom.com |
| 1000 | L: netdev@vger.kernel.org | 1000 | L: netdev@vger.kernel.org |
| 1001 | S: Supported | 1001 | S: Supported |
| 1002 | 1002 | ||
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c index 4b46e68183e0..367b6d462708 100644 --- a/drivers/net/bnx2.c +++ b/drivers/net/bnx2.c | |||
| @@ -5724,14 +5724,12 @@ bnx2_reset_task(struct work_struct *work) | |||
| 5724 | if (!netif_running(bp->dev)) | 5724 | if (!netif_running(bp->dev)) |
| 5725 | return; | 5725 | return; |
| 5726 | 5726 | ||
| 5727 | bp->in_reset_task = 1; | ||
| 5728 | bnx2_netif_stop(bp); | 5727 | bnx2_netif_stop(bp); |
| 5729 | 5728 | ||
| 5730 | bnx2_init_nic(bp); | 5729 | bnx2_init_nic(bp); |
| 5731 | 5730 | ||
| 5732 | atomic_set(&bp->intr_sem, 1); | 5731 | atomic_set(&bp->intr_sem, 1); |
| 5733 | bnx2_netif_start(bp); | 5732 | bnx2_netif_start(bp); |
| 5734 | bp->in_reset_task = 0; | ||
| 5735 | } | 5733 | } |
| 5736 | 5734 | ||
| 5737 | static void | 5735 | static void |
| @@ -5907,12 +5905,7 @@ bnx2_close(struct net_device *dev) | |||
| 5907 | struct bnx2 *bp = netdev_priv(dev); | 5905 | struct bnx2 *bp = netdev_priv(dev); |
| 5908 | u32 reset_code; | 5906 | u32 reset_code; |
| 5909 | 5907 | ||
| 5910 | /* Calling flush_scheduled_work() may deadlock because | 5908 | cancel_work_sync(&bp->reset_task); |
| 5911 | * linkwatch_event() may be on the workqueue and it will try to get | ||
| 5912 | * the rtnl_lock which we are holding. | ||
| 5913 | */ | ||
| 5914 | while (bp->in_reset_task) | ||
| 5915 | msleep(1); | ||
| 5916 | 5909 | ||
| 5917 | bnx2_disable_int_sync(bp); | 5910 | bnx2_disable_int_sync(bp); |
| 5918 | bnx2_napi_disable(bp); | 5911 | bnx2_napi_disable(bp); |
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h index 1eaf5bb3d9c2..2377cc13bf61 100644 --- a/drivers/net/bnx2.h +++ b/drivers/net/bnx2.h | |||
| @@ -6656,7 +6656,6 @@ struct bnx2 { | |||
| 6656 | int current_interval; | 6656 | int current_interval; |
| 6657 | struct timer_list timer; | 6657 | struct timer_list timer; |
| 6658 | struct work_struct reset_task; | 6658 | struct work_struct reset_task; |
| 6659 | int in_reset_task; | ||
| 6660 | 6659 | ||
| 6661 | /* Used to synchronize phy accesses. */ | 6660 | /* Used to synchronize phy accesses. */ |
| 6662 | spinlock_t phy_lock; | 6661 | spinlock_t phy_lock; |
diff --git a/drivers/net/bnx2x.c b/drivers/net/bnx2x.c index 7bdb5af35951..70cba64732ca 100644 --- a/drivers/net/bnx2x.c +++ b/drivers/net/bnx2x.c | |||
| @@ -6,7 +6,8 @@ | |||
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
| 8 | * | 8 | * |
| 9 | * Written by: Eliezer Tamir <eliezert@broadcom.com> | 9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
| 10 | * Written by: Eliezer Tamir | ||
| 10 | * Based on code from Michael Chan's bnx2 driver | 11 | * Based on code from Michael Chan's bnx2 driver |
| 11 | * UDP CSUM errata workaround by Arik Gendelman | 12 | * UDP CSUM errata workaround by Arik Gendelman |
| 12 | * Slowpath rework by Vladislav Zolotarov | 13 | * Slowpath rework by Vladislav Zolotarov |
| @@ -74,7 +75,7 @@ static char version[] __devinitdata = | |||
| 74 | "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " | 75 | "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver " |
| 75 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | 76 | DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; |
| 76 | 77 | ||
| 77 | MODULE_AUTHOR("Eliezer Tamir <eliezert@broadcom.com>"); | 78 | MODULE_AUTHOR("Eliezer Tamir"); |
| 78 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); | 79 | MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver"); |
| 79 | MODULE_LICENSE("GPL"); | 80 | MODULE_LICENSE("GPL"); |
| 80 | MODULE_VERSION(DRV_MODULE_VERSION); | 81 | MODULE_VERSION(DRV_MODULE_VERSION); |
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h index 4f0c0d31e7c1..8e68d06510a6 100644 --- a/drivers/net/bnx2x.h +++ b/drivers/net/bnx2x.h | |||
| @@ -6,7 +6,8 @@ | |||
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
| 8 | * | 8 | * |
| 9 | * Written by: Eliezer Tamir <eliezert@broadcom.com> | 9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
| 10 | * Written by: Eliezer Tamir | ||
| 10 | * Based on code from Michael Chan's bnx2 driver | 11 | * Based on code from Michael Chan's bnx2 driver |
| 11 | */ | 12 | */ |
| 12 | 13 | ||
diff --git a/drivers/net/bnx2x_init.h b/drivers/net/bnx2x_init.h index dcaecc53bdb1..370686eef97c 100644 --- a/drivers/net/bnx2x_init.h +++ b/drivers/net/bnx2x_init.h | |||
| @@ -6,7 +6,8 @@ | |||
| 6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
| 7 | * the Free Software Foundation. | 7 | * the Free Software Foundation. |
| 8 | * | 8 | * |
| 9 | * Written by: Eliezer Tamir <eliezert@broadcom.com> | 9 | * Maintained by: Eilon Greenstein <eilong@broadcom.com> |
| 10 | * Written by: Eliezer Tamir | ||
| 10 | */ | 11 | */ |
| 11 | 12 | ||
| 12 | #ifndef BNX2X_INIT_H | 13 | #ifndef BNX2X_INIT_H |
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c index faae01dc1c4b..075fd547421e 100644 --- a/drivers/net/ehea/ehea_main.c +++ b/drivers/net/ehea/ehea_main.c | |||
| @@ -2605,7 +2605,8 @@ static int ehea_stop(struct net_device *dev) | |||
| 2605 | if (netif_msg_ifdown(port)) | 2605 | if (netif_msg_ifdown(port)) |
| 2606 | ehea_info("disabling port %s", dev->name); | 2606 | ehea_info("disabling port %s", dev->name); |
| 2607 | 2607 | ||
| 2608 | flush_scheduled_work(); | 2608 | cancel_work_sync(&port->reset_task); |
| 2609 | |||
| 2609 | mutex_lock(&port->port_lock); | 2610 | mutex_lock(&port->port_lock); |
| 2610 | netif_stop_queue(dev); | 2611 | netif_stop_queue(dev); |
| 2611 | port_napi_disable(port); | 2612 | port_napi_disable(port); |
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c index 5f9c42e7a7f1..329edd9c08fc 100644 --- a/drivers/net/fec_mpc52xx.c +++ b/drivers/net/fec_mpc52xx.c | |||
| @@ -78,7 +78,7 @@ module_param_array_named(mac, mpc52xx_fec_mac_addr, byte, NULL, 0); | |||
| 78 | MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); | 78 | MODULE_PARM_DESC(mac, "six hex digits, ie. 0x1,0x2,0xc0,0x01,0xba,0xbe"); |
| 79 | 79 | ||
| 80 | #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ | 80 | #define MPC52xx_MESSAGES_DEFAULT ( NETIF_MSG_DRV | NETIF_MSG_PROBE | \ |
| 81 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFDOWN ) | 81 | NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP) |
| 82 | static int debug = -1; /* the above default */ | 82 | static int debug = -1; /* the above default */ |
| 83 | module_param(debug, int, 0); | 83 | module_param(debug, int, 0); |
| 84 | MODULE_PARM_DESC(debug, "debugging messages level"); | 84 | MODULE_PARM_DESC(debug, "debugging messages level"); |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index dde9c7e6408a..00bc7fbb6b37 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
| @@ -959,7 +959,7 @@ static int epp_close(struct net_device *dev) | |||
| 959 | unsigned char tmp[1]; | 959 | unsigned char tmp[1]; |
| 960 | 960 | ||
| 961 | bc->work_running = 0; | 961 | bc->work_running = 0; |
| 962 | flush_scheduled_work(); | 962 | cancel_delayed_work_sync(&bc->run_work); |
| 963 | bc->stat = EPP_DCDBIT; | 963 | bc->stat = EPP_DCDBIT; |
| 964 | tmp[0] = 0; | 964 | tmp[0] = 0; |
| 965 | pp->ops->epp_write_addr(pp, tmp, 1, 0); | 965 | pp->ops->epp_write_addr(pp, tmp, 1, 0); |
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c index 9b358f61ed7f..679a0826780e 100644 --- a/drivers/net/ipg.c +++ b/drivers/net/ipg.c | |||
| @@ -577,12 +577,12 @@ static void ipg_nic_set_multicast_list(struct net_device *dev) | |||
| 577 | /* NIC to be configured in promiscuous mode. */ | 577 | /* NIC to be configured in promiscuous mode. */ |
| 578 | receivemode = IPG_RM_RECEIVEALLFRAMES; | 578 | receivemode = IPG_RM_RECEIVEALLFRAMES; |
| 579 | } else if ((dev->flags & IFF_ALLMULTI) || | 579 | } else if ((dev->flags & IFF_ALLMULTI) || |
| 580 | (dev->flags & IFF_MULTICAST & | 580 | ((dev->flags & IFF_MULTICAST) && |
| 581 | (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { | 581 | (dev->mc_count > IPG_MULTICAST_HASHTABLE_SIZE))) { |
| 582 | /* NIC to be configured to receive all multicast | 582 | /* NIC to be configured to receive all multicast |
| 583 | * frames. */ | 583 | * frames. */ |
| 584 | receivemode |= IPG_RM_RECEIVEMULTICAST; | 584 | receivemode |= IPG_RM_RECEIVEMULTICAST; |
| 585 | } else if (dev->flags & IFF_MULTICAST & (dev->mc_count > 0)) { | 585 | } else if ((dev->flags & IFF_MULTICAST) && (dev->mc_count > 0)) { |
| 586 | /* NIC to be configured to receive selected | 586 | /* NIC to be configured to receive selected |
| 587 | * multicast addresses. */ | 587 | * multicast addresses. */ |
| 588 | receivemode |= IPG_RM_RECEIVEMULTICASTHASH; | 588 | receivemode |= IPG_RM_RECEIVEMULTICASTHASH; |
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c index 169edc154928..858b191517b3 100644 --- a/drivers/net/r6040.c +++ b/drivers/net/r6040.c | |||
| @@ -733,7 +733,7 @@ static void r6040_timer(unsigned long data) | |||
| 733 | } | 733 | } |
| 734 | 734 | ||
| 735 | /* Timer active again */ | 735 | /* Timer active again */ |
| 736 | mod_timer(&lp->timer, jiffies + round_jiffies(HZ)); | 736 | mod_timer(&lp->timer, round_jiffies(jiffies + HZ)); |
| 737 | } | 737 | } |
| 738 | 738 | ||
| 739 | /* Read/set MAC address routines */ | 739 | /* Read/set MAC address routines */ |
diff --git a/drivers/net/smc911x.c b/drivers/net/smc911x.c index 4e2800205189..e2ee91a6ae7e 100644 --- a/drivers/net/smc911x.c +++ b/drivers/net/smc911x.c | |||
| @@ -136,7 +136,6 @@ struct smc911x_local { | |||
| 136 | 136 | ||
| 137 | /* work queue */ | 137 | /* work queue */ |
| 138 | struct work_struct phy_configure; | 138 | struct work_struct phy_configure; |
| 139 | int work_pending; | ||
| 140 | 139 | ||
| 141 | int tx_throttle; | 140 | int tx_throttle; |
| 142 | spinlock_t lock; | 141 | spinlock_t lock; |
| @@ -960,11 +959,11 @@ static void smc911x_phy_configure(struct work_struct *work) | |||
| 960 | * We should not be called if phy_type is zero. | 959 | * We should not be called if phy_type is zero. |
| 961 | */ | 960 | */ |
| 962 | if (lp->phy_type == 0) | 961 | if (lp->phy_type == 0) |
| 963 | goto smc911x_phy_configure_exit_nolock; | 962 | return; |
| 964 | 963 | ||
| 965 | if (smc911x_phy_reset(dev, phyaddr)) { | 964 | if (smc911x_phy_reset(dev, phyaddr)) { |
| 966 | printk("%s: PHY reset timed out\n", dev->name); | 965 | printk("%s: PHY reset timed out\n", dev->name); |
| 967 | goto smc911x_phy_configure_exit_nolock; | 966 | return; |
| 968 | } | 967 | } |
| 969 | spin_lock_irqsave(&lp->lock, flags); | 968 | spin_lock_irqsave(&lp->lock, flags); |
| 970 | 969 | ||
| @@ -1033,8 +1032,6 @@ static void smc911x_phy_configure(struct work_struct *work) | |||
| 1033 | 1032 | ||
| 1034 | smc911x_phy_configure_exit: | 1033 | smc911x_phy_configure_exit: |
| 1035 | spin_unlock_irqrestore(&lp->lock, flags); | 1034 | spin_unlock_irqrestore(&lp->lock, flags); |
| 1036 | smc911x_phy_configure_exit_nolock: | ||
| 1037 | lp->work_pending = 0; | ||
| 1038 | } | 1035 | } |
| 1039 | 1036 | ||
| 1040 | /* | 1037 | /* |
| @@ -1356,11 +1353,8 @@ static void smc911x_timeout(struct net_device *dev) | |||
| 1356 | * smc911x_phy_configure() calls msleep() which calls schedule_timeout() | 1353 | * smc911x_phy_configure() calls msleep() which calls schedule_timeout() |
| 1357 | * which calls schedule(). Hence we use a work queue. | 1354 | * which calls schedule(). Hence we use a work queue. |
| 1358 | */ | 1355 | */ |
| 1359 | if (lp->phy_type != 0) { | 1356 | if (lp->phy_type != 0) |
| 1360 | if (schedule_work(&lp->phy_configure)) { | 1357 | schedule_work(&lp->phy_configure); |
| 1361 | lp->work_pending = 1; | ||
| 1362 | } | ||
| 1363 | } | ||
| 1364 | 1358 | ||
| 1365 | /* We can accept TX packets again */ | 1359 | /* We can accept TX packets again */ |
| 1366 | dev->trans_start = jiffies; | 1360 | dev->trans_start = jiffies; |
| @@ -1531,16 +1525,8 @@ static int smc911x_close(struct net_device *dev) | |||
| 1531 | if (lp->phy_type != 0) { | 1525 | if (lp->phy_type != 0) { |
| 1532 | /* We need to ensure that no calls to | 1526 | /* We need to ensure that no calls to |
| 1533 | * smc911x_phy_configure are pending. | 1527 | * smc911x_phy_configure are pending. |
| 1534 | |||
| 1535 | * flush_scheduled_work() cannot be called because we | ||
| 1536 | * are running with the netlink semaphore held (from | ||
| 1537 | * devinet_ioctl()) and the pending work queue | ||
| 1538 | * contains linkwatch_event() (scheduled by | ||
| 1539 | * netif_carrier_off() above). linkwatch_event() also | ||
| 1540 | * wants the netlink semaphore. | ||
| 1541 | */ | 1528 | */ |
| 1542 | while (lp->work_pending) | 1529 | cancel_work_sync(&lp->phy_configure); |
| 1543 | schedule(); | ||
| 1544 | smc911x_phy_powerdown(dev, lp->mii.phy_id); | 1530 | smc911x_phy_powerdown(dev, lp->mii.phy_id); |
| 1545 | } | 1531 | } |
| 1546 | 1532 | ||
diff --git a/drivers/net/smc91x.c b/drivers/net/smc91x.c index a188e33484e6..f2051b209da2 100644 --- a/drivers/net/smc91x.c +++ b/drivers/net/smc91x.c | |||
| @@ -1016,15 +1016,8 @@ static void smc_phy_powerdown(struct net_device *dev) | |||
| 1016 | 1016 | ||
| 1017 | /* We need to ensure that no calls to smc_phy_configure are | 1017 | /* We need to ensure that no calls to smc_phy_configure are |
| 1018 | pending. | 1018 | pending. |
| 1019 | |||
| 1020 | flush_scheduled_work() cannot be called because we are | ||
| 1021 | running with the netlink semaphore held (from | ||
| 1022 | devinet_ioctl()) and the pending work queue contains | ||
| 1023 | linkwatch_event() (scheduled by netif_carrier_off() | ||
| 1024 | above). linkwatch_event() also wants the netlink semaphore. | ||
| 1025 | */ | 1019 | */ |
| 1026 | while(lp->work_pending) | 1020 | cancel_work_sync(&lp->phy_configure); |
| 1027 | yield(); | ||
| 1028 | 1021 | ||
| 1029 | bmcr = smc_phy_read(dev, phy, MII_BMCR); | 1022 | bmcr = smc_phy_read(dev, phy, MII_BMCR); |
| 1030 | smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); | 1023 | smc_phy_write(dev, phy, MII_BMCR, bmcr | BMCR_PDOWN); |
| @@ -1161,7 +1154,6 @@ static void smc_phy_configure(struct work_struct *work) | |||
| 1161 | smc_phy_configure_exit: | 1154 | smc_phy_configure_exit: |
| 1162 | SMC_SELECT_BANK(lp, 2); | 1155 | SMC_SELECT_BANK(lp, 2); |
| 1163 | spin_unlock_irq(&lp->lock); | 1156 | spin_unlock_irq(&lp->lock); |
| 1164 | lp->work_pending = 0; | ||
| 1165 | } | 1157 | } |
| 1166 | 1158 | ||
| 1167 | /* | 1159 | /* |
| @@ -1389,11 +1381,8 @@ static void smc_timeout(struct net_device *dev) | |||
| 1389 | * smc_phy_configure() calls msleep() which calls schedule_timeout() | 1381 | * smc_phy_configure() calls msleep() which calls schedule_timeout() |
| 1390 | * which calls schedule(). Hence we use a work queue. | 1382 | * which calls schedule(). Hence we use a work queue. |
| 1391 | */ | 1383 | */ |
| 1392 | if (lp->phy_type != 0) { | 1384 | if (lp->phy_type != 0) |
| 1393 | if (schedule_work(&lp->phy_configure)) { | 1385 | schedule_work(&lp->phy_configure); |
| 1394 | lp->work_pending = 1; | ||
| 1395 | } | ||
| 1396 | } | ||
| 1397 | 1386 | ||
| 1398 | /* We can accept TX packets again */ | 1387 | /* We can accept TX packets again */ |
| 1399 | dev->trans_start = jiffies; | 1388 | dev->trans_start = jiffies; |
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c index 55670b5eb611..af8d2c436efd 100644 --- a/drivers/net/tulip/tulip_core.c +++ b/drivers/net/tulip/tulip_core.c | |||
| @@ -731,7 +731,7 @@ static void tulip_down (struct net_device *dev) | |||
| 731 | void __iomem *ioaddr = tp->base_addr; | 731 | void __iomem *ioaddr = tp->base_addr; |
| 732 | unsigned long flags; | 732 | unsigned long flags; |
| 733 | 733 | ||
| 734 | flush_scheduled_work(); | 734 | cancel_work_sync(&tp->media_work); |
| 735 | 735 | ||
| 736 | #ifdef CONFIG_TULIP_NAPI | 736 | #ifdef CONFIG_TULIP_NAPI |
| 737 | napi_disable(&tp->napi); | 737 | napi_disable(&tp->napi); |
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index 0dcfc0310264..7c66b052f55a 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c | |||
| @@ -706,7 +706,7 @@ static void kaweth_kill_urbs(struct kaweth_device *kaweth) | |||
| 706 | usb_kill_urb(kaweth->rx_urb); | 706 | usb_kill_urb(kaweth->rx_urb); |
| 707 | usb_kill_urb(kaweth->tx_urb); | 707 | usb_kill_urb(kaweth->tx_urb); |
| 708 | 708 | ||
| 709 | flush_scheduled_work(); | 709 | cancel_delayed_work_sync(&kaweth->lowmem_work); |
| 710 | 710 | ||
| 711 | /* a scheduled work may have resubmitted, | 711 | /* a scheduled work may have resubmitted, |
| 712 | we hit them again */ | 712 | we hit them again */ |
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c index 20d387f6658c..f7aec9309d04 100644 --- a/drivers/net/wireless/hostap/hostap_main.c +++ b/drivers/net/wireless/hostap/hostap_main.c | |||
| @@ -682,7 +682,13 @@ static int prism2_close(struct net_device *dev) | |||
| 682 | netif_device_detach(dev); | 682 | netif_device_detach(dev); |
| 683 | } | 683 | } |
| 684 | 684 | ||
| 685 | flush_scheduled_work(); | 685 | cancel_work_sync(&local->reset_queue); |
| 686 | cancel_work_sync(&local->set_multicast_list_queue); | ||
| 687 | cancel_work_sync(&local->set_tim_queue); | ||
| 688 | #ifndef PRISM2_NO_STATION_MODES | ||
| 689 | cancel_work_sync(&local->info_queue); | ||
| 690 | #endif | ||
| 691 | cancel_work_sync(&local->comms_qual_update); | ||
| 686 | 692 | ||
| 687 | module_put(local->hw_module); | 693 | module_put(local->hw_module); |
| 688 | 694 | ||
diff --git a/include/linux/tcp.h b/include/linux/tcp.h index 18e62e3d406f..b31b6b74aa28 100644 --- a/include/linux/tcp.h +++ b/include/linux/tcp.h | |||
| @@ -239,11 +239,6 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req) | |||
| 239 | return (struct tcp_request_sock *)req; | 239 | return (struct tcp_request_sock *)req; |
| 240 | } | 240 | } |
| 241 | 241 | ||
| 242 | struct tcp_deferred_accept_info { | ||
| 243 | struct sock *listen_sk; | ||
| 244 | struct request_sock *request; | ||
| 245 | }; | ||
| 246 | |||
| 247 | struct tcp_sock { | 242 | struct tcp_sock { |
| 248 | /* inet_connection_sock has to be the first member of tcp_sock */ | 243 | /* inet_connection_sock has to be the first member of tcp_sock */ |
| 249 | struct inet_connection_sock inet_conn; | 244 | struct inet_connection_sock inet_conn; |
| @@ -379,8 +374,6 @@ struct tcp_sock { | |||
| 379 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ | 374 | unsigned int keepalive_intvl; /* time interval between keep alive probes */ |
| 380 | int linger2; | 375 | int linger2; |
| 381 | 376 | ||
| 382 | struct tcp_deferred_accept_info defer_tcp_accept; | ||
| 383 | |||
| 384 | unsigned long last_synq_overflow; | 377 | unsigned long last_synq_overflow; |
| 385 | 378 | ||
| 386 | u32 tso_deferred; | 379 | u32 tso_deferred; |
diff --git a/include/net/request_sock.h b/include/net/request_sock.h index b220b5f624de..0c96e7bed5db 100644 --- a/include/net/request_sock.h +++ b/include/net/request_sock.h | |||
| @@ -115,8 +115,8 @@ struct request_sock_queue { | |||
| 115 | struct request_sock *rskq_accept_head; | 115 | struct request_sock *rskq_accept_head; |
| 116 | struct request_sock *rskq_accept_tail; | 116 | struct request_sock *rskq_accept_tail; |
| 117 | rwlock_t syn_wait_lock; | 117 | rwlock_t syn_wait_lock; |
| 118 | u16 rskq_defer_accept; | 118 | u8 rskq_defer_accept; |
| 119 | /* 2 bytes hole, try to pack */ | 119 | /* 3 bytes hole, try to pack */ |
| 120 | struct listen_sock *listen_opt; | 120 | struct listen_sock *listen_opt; |
| 121 | }; | 121 | }; |
| 122 | 122 | ||
diff --git a/include/net/tcp.h b/include/net/tcp.h index d448310c82c1..cf54034019d9 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h | |||
| @@ -139,7 +139,6 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); | |||
| 139 | #define MAX_TCP_KEEPINTVL 32767 | 139 | #define MAX_TCP_KEEPINTVL 32767 |
| 140 | #define MAX_TCP_KEEPCNT 127 | 140 | #define MAX_TCP_KEEPCNT 127 |
| 141 | #define MAX_TCP_SYNCNT 127 | 141 | #define MAX_TCP_SYNCNT 127 |
| 142 | #define MAX_TCP_ACCEPT_DEFERRED 65535 | ||
| 143 | 142 | ||
| 144 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ | 143 | #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */ |
| 145 | 144 | ||
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c index 6de4bd195d28..1e8be246ad15 100644 --- a/net/dccp/ackvec.c +++ b/net/dccp/ackvec.c | |||
| @@ -290,12 +290,12 @@ int dccp_ackvec_add(struct dccp_ackvec *av, const struct sock *sk, | |||
| 290 | 290 | ||
| 291 | while (1) { | 291 | while (1) { |
| 292 | const u8 len = dccp_ackvec_len(av, index); | 292 | const u8 len = dccp_ackvec_len(av, index); |
| 293 | const u8 state = dccp_ackvec_state(av, index); | 293 | const u8 av_state = dccp_ackvec_state(av, index); |
| 294 | /* | 294 | /* |
| 295 | * valid packets not yet in av_buf have a reserved | 295 | * valid packets not yet in av_buf have a reserved |
| 296 | * entry, with a len equal to 0. | 296 | * entry, with a len equal to 0. |
| 297 | */ | 297 | */ |
| 298 | if (state == DCCP_ACKVEC_STATE_NOT_RECEIVED && | 298 | if (av_state == DCCP_ACKVEC_STATE_NOT_RECEIVED && |
| 299 | len == 0 && delta == 0) { /* Found our | 299 | len == 0 && delta == 0) { /* Found our |
| 300 | reserved seat! */ | 300 | reserved seat! */ |
| 301 | dccp_pr_debug("Found %llu reserved seat!\n", | 301 | dccp_pr_debug("Found %llu reserved seat!\n", |
| @@ -325,31 +325,6 @@ out_duplicate: | |||
| 325 | return -EILSEQ; | 325 | return -EILSEQ; |
| 326 | } | 326 | } |
| 327 | 327 | ||
| 328 | #ifdef CONFIG_IP_DCCP_DEBUG | ||
| 329 | void dccp_ackvector_print(const u64 ackno, const unsigned char *vector, int len) | ||
| 330 | { | ||
| 331 | dccp_pr_debug_cat("ACK vector len=%d, ackno=%llu |", len, | ||
| 332 | (unsigned long long)ackno); | ||
| 333 | |||
| 334 | while (len--) { | ||
| 335 | const u8 state = (*vector & DCCP_ACKVEC_STATE_MASK) >> 6; | ||
| 336 | const u8 rl = *vector & DCCP_ACKVEC_LEN_MASK; | ||
| 337 | |||
| 338 | dccp_pr_debug_cat("%d,%d|", state, rl); | ||
| 339 | ++vector; | ||
| 340 | } | ||
| 341 | |||
| 342 | dccp_pr_debug_cat("\n"); | ||
| 343 | } | ||
| 344 | |||
| 345 | void dccp_ackvec_print(const struct dccp_ackvec *av) | ||
| 346 | { | ||
| 347 | dccp_ackvector_print(av->av_buf_ackno, | ||
| 348 | av->av_buf + av->av_buf_head, | ||
| 349 | av->av_vec_len); | ||
| 350 | } | ||
| 351 | #endif | ||
| 352 | |||
| 353 | static void dccp_ackvec_throw_record(struct dccp_ackvec *av, | 328 | static void dccp_ackvec_throw_record(struct dccp_ackvec *av, |
| 354 | struct dccp_ackvec_record *avr) | 329 | struct dccp_ackvec_record *avr) |
| 355 | { | 330 | { |
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c index f813077234b7..a1929f33d703 100644 --- a/net/dccp/ccids/ccid3.c +++ b/net/dccp/ccids/ccid3.c | |||
| @@ -159,8 +159,8 @@ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) | |||
| 159 | } else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld) | 159 | } else if (ktime_us_delta(now, hctx->ccid3hctx_t_ld) |
| 160 | - (s64)hctx->ccid3hctx_rtt >= 0) { | 160 | - (s64)hctx->ccid3hctx_rtt >= 0) { |
| 161 | 161 | ||
| 162 | hctx->ccid3hctx_x = | 162 | hctx->ccid3hctx_x = min(2 * hctx->ccid3hctx_x, min_rate); |
| 163 | max(min(2 * hctx->ccid3hctx_x, min_rate), | 163 | hctx->ccid3hctx_x = max(hctx->ccid3hctx_x, |
| 164 | scaled_div(((__u64)hctx->ccid3hctx_s) << 6, | 164 | scaled_div(((__u64)hctx->ccid3hctx_s) << 6, |
| 165 | hctx->ccid3hctx_rtt)); | 165 | hctx->ccid3hctx_rtt)); |
| 166 | hctx->ccid3hctx_t_ld = now; | 166 | hctx->ccid3hctx_t_ld = now; |
| @@ -329,8 +329,14 @@ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) | |||
| 329 | hctx->ccid3hctx_x = rfc3390_initial_rate(sk); | 329 | hctx->ccid3hctx_x = rfc3390_initial_rate(sk); |
| 330 | hctx->ccid3hctx_t_ld = now; | 330 | hctx->ccid3hctx_t_ld = now; |
| 331 | } else { | 331 | } else { |
| 332 | /* Sender does not have RTT sample: X_pps = 1 pkt/sec */ | 332 | /* |
| 333 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | 333 | * Sender does not have RTT sample: |
| 334 | * - set fallback RTT (RFC 4340, 3.4) since a RTT value | ||
| 335 | * is needed in several parts (e.g. window counter); | ||
| 336 | * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. | ||
| 337 | */ | ||
| 338 | hctx->ccid3hctx_rtt = DCCP_FALLBACK_RTT; | ||
| 339 | hctx->ccid3hctx_x = hctx->ccid3hctx_s; | ||
| 334 | hctx->ccid3hctx_x <<= 6; | 340 | hctx->ccid3hctx_x <<= 6; |
| 335 | } | 341 | } |
| 336 | ccid3_update_send_interval(hctx); | 342 | ccid3_update_send_interval(hctx); |
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c index d1dfbb8de64c..97ecec0a8e76 100644 --- a/net/dccp/ccids/lib/tfrc.c +++ b/net/dccp/ccids/lib/tfrc.c | |||
| @@ -14,14 +14,6 @@ module_param(tfrc_debug, bool, 0444); | |||
| 14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); | 14 | MODULE_PARM_DESC(tfrc_debug, "Enable debug messages"); |
| 15 | #endif | 15 | #endif |
| 16 | 16 | ||
| 17 | extern int tfrc_tx_packet_history_init(void); | ||
| 18 | extern void tfrc_tx_packet_history_exit(void); | ||
| 19 | extern int tfrc_rx_packet_history_init(void); | ||
| 20 | extern void tfrc_rx_packet_history_exit(void); | ||
| 21 | |||
| 22 | extern int tfrc_li_init(void); | ||
| 23 | extern void tfrc_li_exit(void); | ||
| 24 | |||
| 25 | static int __init tfrc_module_init(void) | 17 | static int __init tfrc_module_init(void) |
| 26 | { | 18 | { |
| 27 | int rc = tfrc_li_init(); | 19 | int rc = tfrc_li_init(); |
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h index 1fb1187bbf1c..ed9857527acf 100644 --- a/net/dccp/ccids/lib/tfrc.h +++ b/net/dccp/ccids/lib/tfrc.h | |||
| @@ -15,7 +15,7 @@ | |||
| 15 | * (at your option) any later version. | 15 | * (at your option) any later version. |
| 16 | */ | 16 | */ |
| 17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
| 18 | #include <asm/div64.h> | 18 | #include <linux/math64.h> |
| 19 | #include "../../dccp.h" | 19 | #include "../../dccp.h" |
| 20 | /* internal includes that this module exports: */ | 20 | /* internal includes that this module exports: */ |
| 21 | #include "loss_interval.h" | 21 | #include "loss_interval.h" |
| @@ -29,21 +29,19 @@ extern int tfrc_debug; | |||
| 29 | #endif | 29 | #endif |
| 30 | 30 | ||
| 31 | /* integer-arithmetic divisions of type (a * 1000000)/b */ | 31 | /* integer-arithmetic divisions of type (a * 1000000)/b */ |
| 32 | static inline u64 scaled_div(u64 a, u32 b) | 32 | static inline u64 scaled_div(u64 a, u64 b) |
| 33 | { | 33 | { |
| 34 | BUG_ON(b==0); | 34 | BUG_ON(b==0); |
| 35 | a *= 1000000; | 35 | return div64_u64(a * 1000000, b); |
| 36 | do_div(a, b); | ||
| 37 | return a; | ||
| 38 | } | 36 | } |
| 39 | 37 | ||
| 40 | static inline u32 scaled_div32(u64 a, u32 b) | 38 | static inline u32 scaled_div32(u64 a, u64 b) |
| 41 | { | 39 | { |
| 42 | u64 result = scaled_div(a, b); | 40 | u64 result = scaled_div(a, b); |
| 43 | 41 | ||
| 44 | if (result > UINT_MAX) { | 42 | if (result > UINT_MAX) { |
| 45 | DCCP_CRIT("Overflow: a(%llu)/b(%u) > ~0U", | 43 | DCCP_CRIT("Overflow: %llu/%llu > UINT_MAX", |
| 46 | (unsigned long long)a, b); | 44 | (unsigned long long)a, (unsigned long long)b); |
| 47 | return UINT_MAX; | 45 | return UINT_MAX; |
| 48 | } | 46 | } |
| 49 | return result; | 47 | return result; |
| @@ -58,7 +56,14 @@ static inline u32 tfrc_ewma(const u32 avg, const u32 newval, const u8 weight) | |||
| 58 | return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval; | 56 | return avg ? (weight * avg + (10 - weight) * newval) / 10 : newval; |
| 59 | } | 57 | } |
| 60 | 58 | ||
| 61 | extern u32 tfrc_calc_x(u16 s, u32 R, u32 p); | 59 | extern u32 tfrc_calc_x(u16 s, u32 R, u32 p); |
| 62 | extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue); | 60 | extern u32 tfrc_calc_x_reverse_lookup(u32 fvalue); |
| 63 | 61 | ||
| 62 | extern int tfrc_tx_packet_history_init(void); | ||
| 63 | extern void tfrc_tx_packet_history_exit(void); | ||
| 64 | extern int tfrc_rx_packet_history_init(void); | ||
| 65 | extern void tfrc_rx_packet_history_exit(void); | ||
| 66 | |||
| 67 | extern int tfrc_li_init(void); | ||
| 68 | extern void tfrc_li_exit(void); | ||
| 64 | #endif /* _TFRC_H_ */ | 69 | #endif /* _TFRC_H_ */ |
diff --git a/net/dccp/ccids/lib/tfrc_equation.c b/net/dccp/ccids/lib/tfrc_equation.c index e4e64b76c10c..2f20a29cffe4 100644 --- a/net/dccp/ccids/lib/tfrc_equation.c +++ b/net/dccp/ccids/lib/tfrc_equation.c | |||
| @@ -661,7 +661,7 @@ u32 tfrc_calc_x(u16 s, u32 R, u32 p) | |||
| 661 | 661 | ||
| 662 | EXPORT_SYMBOL_GPL(tfrc_calc_x); | 662 | EXPORT_SYMBOL_GPL(tfrc_calc_x); |
| 663 | 663 | ||
| 664 | /* | 664 | /** |
| 665 | * tfrc_calc_x_reverse_lookup - try to find p given f(p) | 665 | * tfrc_calc_x_reverse_lookup - try to find p given f(p) |
| 666 | * | 666 | * |
| 667 | * @fvalue: function value to match, scaled by 1000000 | 667 | * @fvalue: function value to match, scaled by 1000000 |
| @@ -676,11 +676,11 @@ u32 tfrc_calc_x_reverse_lookup(u32 fvalue) | |||
| 676 | 676 | ||
| 677 | /* Error cases. */ | 677 | /* Error cases. */ |
| 678 | if (fvalue < tfrc_calc_x_lookup[0][1]) { | 678 | if (fvalue < tfrc_calc_x_lookup[0][1]) { |
| 679 | DCCP_WARN("fvalue %d smaller than resolution\n", fvalue); | 679 | DCCP_WARN("fvalue %u smaller than resolution\n", fvalue); |
| 680 | return tfrc_calc_x_lookup[0][1]; | 680 | return TFRC_SMALLEST_P; |
| 681 | } | 681 | } |
| 682 | if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0]) { | 682 | if (fvalue > tfrc_calc_x_lookup[TFRC_CALC_X_ARRSIZE - 1][0]) { |
| 683 | DCCP_WARN("fvalue %d exceeds bounds!\n", fvalue); | 683 | DCCP_WARN("fvalue %u exceeds bounds!\n", fvalue); |
| 684 | return 1000000; | 684 | return 1000000; |
| 685 | } | 685 | } |
| 686 | 686 | ||
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c index 33ad48321b08..66dca5bba858 100644 --- a/net/dccp/minisocks.c +++ b/net/dccp/minisocks.c | |||
| @@ -165,12 +165,12 @@ out_free: | |||
| 165 | /* See dccp_v4_conn_request */ | 165 | /* See dccp_v4_conn_request */ |
| 166 | newdmsk->dccpms_sequence_window = req->rcv_wnd; | 166 | newdmsk->dccpms_sequence_window = req->rcv_wnd; |
| 167 | 167 | ||
| 168 | newdp->dccps_gar = newdp->dccps_isr = dreq->dreq_isr; | 168 | newdp->dccps_gar = newdp->dccps_iss = dreq->dreq_iss; |
| 169 | dccp_update_gsr(newsk, dreq->dreq_isr); | ||
| 170 | |||
| 171 | newdp->dccps_iss = dreq->dreq_iss; | ||
| 172 | dccp_update_gss(newsk, dreq->dreq_iss); | 169 | dccp_update_gss(newsk, dreq->dreq_iss); |
| 173 | 170 | ||
| 171 | newdp->dccps_isr = dreq->dreq_isr; | ||
| 172 | dccp_update_gsr(newsk, dreq->dreq_isr); | ||
| 173 | |||
| 174 | /* | 174 | /* |
| 175 | * SWL and AWL are initially adjusted so that they are not less than | 175 | * SWL and AWL are initially adjusted so that they are not less than |
| 176 | * the initial Sequence Numbers received and sent, respectively: | 176 | * the initial Sequence Numbers received and sent, respectively: |
diff --git a/net/dccp/options.c b/net/dccp/options.c index d2a84a2fecee..43bc24e761d0 100644 --- a/net/dccp/options.c +++ b/net/dccp/options.c | |||
| @@ -107,9 +107,11 @@ int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, | |||
| 107 | * | 107 | * |
| 108 | * CCID-specific options are ignored during connection setup, as | 108 | * CCID-specific options are ignored during connection setup, as |
| 109 | * negotiation may still be in progress (see RFC 4340, 10.3). | 109 | * negotiation may still be in progress (see RFC 4340, 10.3). |
| 110 | * The same applies to Ack Vectors, as these depend on the CCID. | ||
| 110 | * | 111 | * |
| 111 | */ | 112 | */ |
| 112 | if (dreq != NULL && opt >= 128) | 113 | if (dreq != NULL && (opt >= 128 || |
| 114 | opt == DCCPO_ACK_VECTOR_0 || opt == DCCPO_ACK_VECTOR_1)) | ||
| 113 | goto ignore_option; | 115 | goto ignore_option; |
| 114 | 116 | ||
| 115 | switch (opt) { | 117 | switch (opt) { |
diff --git a/net/dccp/output.c b/net/dccp/output.c index 1f8a9b64c083..fe20068c5d8e 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c | |||
| @@ -508,6 +508,7 @@ void dccp_send_ack(struct sock *sk) | |||
| 508 | 508 | ||
| 509 | EXPORT_SYMBOL_GPL(dccp_send_ack); | 509 | EXPORT_SYMBOL_GPL(dccp_send_ack); |
| 510 | 510 | ||
| 511 | #if 0 | ||
| 511 | /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ | 512 | /* FIXME: Is this still necessary (11.3) - currently nowhere used by DCCP. */ |
| 512 | void dccp_send_delayed_ack(struct sock *sk) | 513 | void dccp_send_delayed_ack(struct sock *sk) |
| 513 | { | 514 | { |
| @@ -538,6 +539,7 @@ void dccp_send_delayed_ack(struct sock *sk) | |||
| 538 | icsk->icsk_ack.timeout = timeout; | 539 | icsk->icsk_ack.timeout = timeout; |
| 539 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); | 540 | sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout); |
| 540 | } | 541 | } |
| 542 | #endif | ||
| 541 | 543 | ||
| 542 | void dccp_send_sync(struct sock *sk, const u64 ackno, | 544 | void dccp_send_sync(struct sock *sk, const u64 ackno, |
| 543 | const enum dccp_pkt_type pkt_type) | 545 | const enum dccp_pkt_type pkt_type) |
diff --git a/net/dccp/probe.c b/net/dccp/probe.c index 0bcdc9250279..81368a7f5379 100644 --- a/net/dccp/probe.c +++ b/net/dccp/probe.c | |||
| @@ -42,7 +42,7 @@ static int bufsize = 64 * 1024; | |||
| 42 | 42 | ||
| 43 | static const char procname[] = "dccpprobe"; | 43 | static const char procname[] = "dccpprobe"; |
| 44 | 44 | ||
| 45 | struct { | 45 | static struct { |
| 46 | struct kfifo *fifo; | 46 | struct kfifo *fifo; |
| 47 | spinlock_t lock; | 47 | spinlock_t lock; |
| 48 | wait_queue_head_t wait; | 48 | wait_queue_head_t wait; |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 828ea211ff21..045e799d3e1d 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
| @@ -419,7 +419,8 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
| 419 | struct inet_connection_sock *icsk = inet_csk(parent); | 419 | struct inet_connection_sock *icsk = inet_csk(parent); |
| 420 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; | 420 | struct request_sock_queue *queue = &icsk->icsk_accept_queue; |
| 421 | struct listen_sock *lopt = queue->listen_opt; | 421 | struct listen_sock *lopt = queue->listen_opt; |
| 422 | int thresh = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; | 422 | int max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries; |
| 423 | int thresh = max_retries; | ||
| 423 | unsigned long now = jiffies; | 424 | unsigned long now = jiffies; |
| 424 | struct request_sock **reqp, *req; | 425 | struct request_sock **reqp, *req; |
| 425 | int i, budget; | 426 | int i, budget; |
| @@ -455,6 +456,9 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
| 455 | } | 456 | } |
| 456 | } | 457 | } |
| 457 | 458 | ||
| 459 | if (queue->rskq_defer_accept) | ||
| 460 | max_retries = queue->rskq_defer_accept; | ||
| 461 | |||
| 458 | budget = 2 * (lopt->nr_table_entries / (timeout / interval)); | 462 | budget = 2 * (lopt->nr_table_entries / (timeout / interval)); |
| 459 | i = lopt->clock_hand; | 463 | i = lopt->clock_hand; |
| 460 | 464 | ||
| @@ -462,8 +466,9 @@ void inet_csk_reqsk_queue_prune(struct sock *parent, | |||
| 462 | reqp=&lopt->syn_table[i]; | 466 | reqp=&lopt->syn_table[i]; |
| 463 | while ((req = *reqp) != NULL) { | 467 | while ((req = *reqp) != NULL) { |
| 464 | if (time_after_eq(now, req->expires)) { | 468 | if (time_after_eq(now, req->expires)) { |
| 465 | if (req->retrans < thresh && | 469 | if ((req->retrans < (inet_rsk(req)->acked ? max_retries : thresh)) && |
| 466 | !req->rsk_ops->rtx_syn_ack(parent, req)) { | 470 | (inet_rsk(req)->acked || |
| 471 | !req->rsk_ops->rtx_syn_ack(parent, req))) { | ||
| 467 | unsigned long timeo; | 472 | unsigned long timeo; |
| 468 | 473 | ||
| 469 | if (req->retrans++ == 0) | 474 | if (req->retrans++ == 0) |
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index ab66683b8043..fc54a48fde1e 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c | |||
| @@ -2112,12 +2112,15 @@ static int do_tcp_setsockopt(struct sock *sk, int level, | |||
| 2112 | break; | 2112 | break; |
| 2113 | 2113 | ||
| 2114 | case TCP_DEFER_ACCEPT: | 2114 | case TCP_DEFER_ACCEPT: |
| 2115 | if (val < 0) { | 2115 | icsk->icsk_accept_queue.rskq_defer_accept = 0; |
| 2116 | err = -EINVAL; | 2116 | if (val > 0) { |
| 2117 | } else { | 2117 | /* Translate value in seconds to number of |
| 2118 | if (val > MAX_TCP_ACCEPT_DEFERRED) | 2118 | * retransmits */ |
| 2119 | val = MAX_TCP_ACCEPT_DEFERRED; | 2119 | while (icsk->icsk_accept_queue.rskq_defer_accept < 32 && |
| 2120 | icsk->icsk_accept_queue.rskq_defer_accept = val; | 2120 | val > ((TCP_TIMEOUT_INIT / HZ) << |
| 2121 | icsk->icsk_accept_queue.rskq_defer_accept)) | ||
| 2122 | icsk->icsk_accept_queue.rskq_defer_accept++; | ||
| 2123 | icsk->icsk_accept_queue.rskq_defer_accept++; | ||
| 2121 | } | 2124 | } |
| 2122 | break; | 2125 | break; |
| 2123 | 2126 | ||
| @@ -2299,7 +2302,8 @@ static int do_tcp_getsockopt(struct sock *sk, int level, | |||
| 2299 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; | 2302 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; |
| 2300 | break; | 2303 | break; |
| 2301 | case TCP_DEFER_ACCEPT: | 2304 | case TCP_DEFER_ACCEPT: |
| 2302 | val = icsk->icsk_accept_queue.rskq_defer_accept; | 2305 | val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 : |
| 2306 | ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1)); | ||
| 2303 | break; | 2307 | break; |
| 2304 | case TCP_WINDOW_CLAMP: | 2308 | case TCP_WINDOW_CLAMP: |
| 2305 | val = tp->window_clamp; | 2309 | val = tp->window_clamp; |
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index eba873e9b560..cad73b7dfef0 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c | |||
| @@ -4541,49 +4541,6 @@ static void tcp_urg(struct sock *sk, struct sk_buff *skb, struct tcphdr *th) | |||
| 4541 | } | 4541 | } |
| 4542 | } | 4542 | } |
| 4543 | 4543 | ||
| 4544 | static int tcp_defer_accept_check(struct sock *sk) | ||
| 4545 | { | ||
| 4546 | struct tcp_sock *tp = tcp_sk(sk); | ||
| 4547 | |||
| 4548 | if (tp->defer_tcp_accept.request) { | ||
| 4549 | int queued_data = tp->rcv_nxt - tp->copied_seq; | ||
| 4550 | int hasfin = !skb_queue_empty(&sk->sk_receive_queue) ? | ||
| 4551 | tcp_hdr((struct sk_buff *) | ||
| 4552 | sk->sk_receive_queue.prev)->fin : 0; | ||
| 4553 | |||
| 4554 | if (queued_data && hasfin) | ||
| 4555 | queued_data--; | ||
| 4556 | |||
| 4557 | if (queued_data && | ||
| 4558 | tp->defer_tcp_accept.listen_sk->sk_state == TCP_LISTEN) { | ||
| 4559 | if (sock_flag(sk, SOCK_KEEPOPEN)) { | ||
| 4560 | inet_csk_reset_keepalive_timer(sk, | ||
| 4561 | keepalive_time_when(tp)); | ||
| 4562 | } else { | ||
| 4563 | inet_csk_delete_keepalive_timer(sk); | ||
| 4564 | } | ||
| 4565 | |||
| 4566 | inet_csk_reqsk_queue_add( | ||
| 4567 | tp->defer_tcp_accept.listen_sk, | ||
| 4568 | tp->defer_tcp_accept.request, | ||
| 4569 | sk); | ||
| 4570 | |||
| 4571 | tp->defer_tcp_accept.listen_sk->sk_data_ready( | ||
| 4572 | tp->defer_tcp_accept.listen_sk, 0); | ||
| 4573 | |||
| 4574 | sock_put(tp->defer_tcp_accept.listen_sk); | ||
| 4575 | sock_put(sk); | ||
| 4576 | tp->defer_tcp_accept.listen_sk = NULL; | ||
| 4577 | tp->defer_tcp_accept.request = NULL; | ||
| 4578 | } else if (hasfin || | ||
| 4579 | tp->defer_tcp_accept.listen_sk->sk_state != TCP_LISTEN) { | ||
| 4580 | tcp_reset(sk); | ||
| 4581 | return -1; | ||
| 4582 | } | ||
| 4583 | } | ||
| 4584 | return 0; | ||
| 4585 | } | ||
| 4586 | |||
| 4587 | static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) | 4544 | static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen) |
| 4588 | { | 4545 | { |
| 4589 | struct tcp_sock *tp = tcp_sk(sk); | 4546 | struct tcp_sock *tp = tcp_sk(sk); |
| @@ -4944,8 +4901,6 @@ step5: | |||
| 4944 | 4901 | ||
| 4945 | tcp_data_snd_check(sk); | 4902 | tcp_data_snd_check(sk); |
| 4946 | tcp_ack_snd_check(sk); | 4903 | tcp_ack_snd_check(sk); |
| 4947 | |||
| 4948 | tcp_defer_accept_check(sk); | ||
| 4949 | return 0; | 4904 | return 0; |
| 4950 | 4905 | ||
| 4951 | csum_error: | 4906 | csum_error: |
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4f8485c67d1a..97a230026e13 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c | |||
| @@ -1918,14 +1918,6 @@ int tcp_v4_destroy_sock(struct sock *sk) | |||
| 1918 | sk->sk_sndmsg_page = NULL; | 1918 | sk->sk_sndmsg_page = NULL; |
| 1919 | } | 1919 | } |
| 1920 | 1920 | ||
| 1921 | if (tp->defer_tcp_accept.request) { | ||
| 1922 | reqsk_free(tp->defer_tcp_accept.request); | ||
| 1923 | sock_put(tp->defer_tcp_accept.listen_sk); | ||
| 1924 | sock_put(sk); | ||
| 1925 | tp->defer_tcp_accept.listen_sk = NULL; | ||
| 1926 | tp->defer_tcp_accept.request = NULL; | ||
| 1927 | } | ||
| 1928 | |||
| 1929 | atomic_dec(&tcp_sockets_allocated); | 1921 | atomic_dec(&tcp_sockets_allocated); |
| 1930 | 1922 | ||
| 1931 | return 0; | 1923 | return 0; |
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c index 019c8c16e5cc..8245247a6ceb 100644 --- a/net/ipv4/tcp_minisocks.c +++ b/net/ipv4/tcp_minisocks.c | |||
| @@ -571,8 +571,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
| 571 | does sequence test, SYN is truncated, and thus we consider | 571 | does sequence test, SYN is truncated, and thus we consider |
| 572 | it a bare ACK. | 572 | it a bare ACK. |
| 573 | 573 | ||
| 574 | Both ends (listening sockets) accept the new incoming | 574 | If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this |
| 575 | connection and try to talk to each other. 8-) | 575 | bare ACK. Otherwise, we create an established connection. Both |
| 576 | ends (listening sockets) accept the new incoming connection and try | ||
| 577 | to talk to each other. 8-) | ||
| 576 | 578 | ||
| 577 | Note: This case is both harmless, and rare. Possibility is about the | 579 | Note: This case is both harmless, and rare. Possibility is about the |
| 578 | same as us discovering intelligent life on another plant tomorrow. | 580 | same as us discovering intelligent life on another plant tomorrow. |
| @@ -640,6 +642,13 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
| 640 | if (!(flg & TCP_FLAG_ACK)) | 642 | if (!(flg & TCP_FLAG_ACK)) |
| 641 | return NULL; | 643 | return NULL; |
| 642 | 644 | ||
| 645 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ | ||
| 646 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | ||
| 647 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | ||
| 648 | inet_rsk(req)->acked = 1; | ||
| 649 | return NULL; | ||
| 650 | } | ||
| 651 | |||
| 643 | /* OK, ACK is valid, create big socket and | 652 | /* OK, ACK is valid, create big socket and |
| 644 | * feed this segment to it. It will repeat all | 653 | * feed this segment to it. It will repeat all |
| 645 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO | 654 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO |
| @@ -678,24 +687,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, | |||
| 678 | inet_csk_reqsk_queue_unlink(sk, req, prev); | 687 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
| 679 | inet_csk_reqsk_queue_removed(sk, req); | 688 | inet_csk_reqsk_queue_removed(sk, req); |
| 680 | 689 | ||
| 681 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | 690 | inet_csk_reqsk_queue_add(sk, req, child); |
| 682 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | ||
| 683 | |||
| 684 | /* the accept queue handling is done is est recv slow | ||
| 685 | * path so lets make sure to start there | ||
| 686 | */ | ||
| 687 | tcp_sk(child)->pred_flags = 0; | ||
| 688 | sock_hold(sk); | ||
| 689 | sock_hold(child); | ||
| 690 | tcp_sk(child)->defer_tcp_accept.listen_sk = sk; | ||
| 691 | tcp_sk(child)->defer_tcp_accept.request = req; | ||
| 692 | |||
| 693 | inet_csk_reset_keepalive_timer(child, | ||
| 694 | inet_csk(sk)->icsk_accept_queue.rskq_defer_accept * HZ); | ||
| 695 | } else { | ||
| 696 | inet_csk_reqsk_queue_add(sk, req, child); | ||
| 697 | } | ||
| 698 | |||
| 699 | return child; | 691 | return child; |
| 700 | 692 | ||
| 701 | listen_overflow: | 693 | listen_overflow: |
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 4de68cf5f2aa..63ed9d6830e7 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c | |||
| @@ -489,11 +489,6 @@ static void tcp_keepalive_timer (unsigned long data) | |||
| 489 | goto death; | 489 | goto death; |
| 490 | } | 490 | } |
| 491 | 491 | ||
| 492 | if (tp->defer_tcp_accept.request && sk->sk_state == TCP_ESTABLISHED) { | ||
| 493 | tcp_send_active_reset(sk, GFP_ATOMIC); | ||
| 494 | goto death; | ||
| 495 | } | ||
| 496 | |||
| 497 | if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) | 492 | if (!sock_flag(sk, SOCK_KEEPOPEN) || sk->sk_state == TCP_CLOSE) |
| 498 | goto out; | 493 | goto out; |
| 499 | 494 | ||
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index b9c2de84a8a2..0f0f94a40335 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c | |||
| @@ -705,6 +705,11 @@ int datagram_send_ctl(struct net *net, | |||
| 705 | } | 705 | } |
| 706 | 706 | ||
| 707 | *hlimit = *(int *)CMSG_DATA(cmsg); | 707 | *hlimit = *(int *)CMSG_DATA(cmsg); |
| 708 | if (*hlimit < -1 || *hlimit > 0xff) { | ||
| 709 | err = -EINVAL; | ||
| 710 | goto exit_f; | ||
| 711 | } | ||
| 712 | |||
| 708 | break; | 713 | break; |
| 709 | 714 | ||
| 710 | case IPV6_TCLASS: | 715 | case IPV6_TCLASS: |
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index 26b83e512a09..c042ce19bd14 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c | |||
| @@ -67,7 +67,7 @@ int ip6_ra_control(struct sock *sk, int sel, void (*destructor)(struct sock *)) | |||
| 67 | 67 | ||
| 68 | /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ | 68 | /* RA packet may be delivered ONLY to IPPROTO_RAW socket */ |
| 69 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW) | 69 | if (sk->sk_type != SOCK_RAW || inet_sk(sk)->num != IPPROTO_RAW) |
| 70 | return -EINVAL; | 70 | return -ENOPROTOOPT; |
| 71 | 71 | ||
| 72 | new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; | 72 | new_ra = (sel>=0) ? kmalloc(sizeof(*new_ra), GFP_KERNEL) : NULL; |
| 73 | 73 | ||
| @@ -446,7 +446,7 @@ done: | |||
| 446 | 446 | ||
| 447 | case IPV6_MULTICAST_HOPS: | 447 | case IPV6_MULTICAST_HOPS: |
| 448 | if (sk->sk_type == SOCK_STREAM) | 448 | if (sk->sk_type == SOCK_STREAM) |
| 449 | goto e_inval; | 449 | break; |
| 450 | if (optlen < sizeof(int)) | 450 | if (optlen < sizeof(int)) |
| 451 | goto e_inval; | 451 | goto e_inval; |
| 452 | if (val > 255 || val < -1) | 452 | if (val > 255 || val < -1) |
| @@ -458,13 +458,15 @@ done: | |||
| 458 | case IPV6_MULTICAST_LOOP: | 458 | case IPV6_MULTICAST_LOOP: |
| 459 | if (optlen < sizeof(int)) | 459 | if (optlen < sizeof(int)) |
| 460 | goto e_inval; | 460 | goto e_inval; |
| 461 | if (val != valbool) | ||
| 462 | goto e_inval; | ||
| 461 | np->mc_loop = valbool; | 463 | np->mc_loop = valbool; |
| 462 | retv = 0; | 464 | retv = 0; |
| 463 | break; | 465 | break; |
| 464 | 466 | ||
| 465 | case IPV6_MULTICAST_IF: | 467 | case IPV6_MULTICAST_IF: |
| 466 | if (sk->sk_type == SOCK_STREAM) | 468 | if (sk->sk_type == SOCK_STREAM) |
| 467 | goto e_inval; | 469 | break; |
| 468 | if (optlen < sizeof(int)) | 470 | if (optlen < sizeof(int)) |
| 469 | goto e_inval; | 471 | goto e_inval; |
| 470 | 472 | ||
| @@ -860,7 +862,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
| 860 | if (sk->sk_protocol != IPPROTO_UDP && | 862 | if (sk->sk_protocol != IPPROTO_UDP && |
| 861 | sk->sk_protocol != IPPROTO_UDPLITE && | 863 | sk->sk_protocol != IPPROTO_UDPLITE && |
| 862 | sk->sk_protocol != IPPROTO_TCP) | 864 | sk->sk_protocol != IPPROTO_TCP) |
| 863 | return -EINVAL; | 865 | return -ENOPROTOOPT; |
| 864 | if (sk->sk_state != TCP_ESTABLISHED) | 866 | if (sk->sk_state != TCP_ESTABLISHED) |
| 865 | return -ENOTCONN; | 867 | return -ENOTCONN; |
| 866 | val = sk->sk_family; | 868 | val = sk->sk_family; |
| @@ -874,6 +876,8 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, | |||
| 874 | return -EINVAL; | 876 | return -EINVAL; |
| 875 | if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) | 877 | if (copy_from_user(&gsf, optval, GROUP_FILTER_SIZE(0))) |
| 876 | return -EFAULT; | 878 | return -EFAULT; |
| 879 | if (gsf.gf_group.ss_family != AF_INET6) | ||
| 880 | return -EADDRNOTAVAIL; | ||
| 877 | lock_sock(sk); | 881 | lock_sock(sk); |
| 878 | err = ip6_mc_msfget(sk, &gsf, | 882 | err = ip6_mc_msfget(sk, &gsf, |
| 879 | (struct group_filter __user *)optval, optlen); | 883 | (struct group_filter __user *)optval, optlen); |
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c index 8fee9a15b2d3..3aee12310d94 100644 --- a/net/ipv6/raw.c +++ b/net/ipv6/raw.c | |||
| @@ -1169,7 +1169,8 @@ static int raw6_destroy(struct sock *sk) | |||
| 1169 | lock_sock(sk); | 1169 | lock_sock(sk); |
| 1170 | ip6_flush_pending_frames(sk); | 1170 | ip6_flush_pending_frames(sk); |
| 1171 | release_sock(sk); | 1171 | release_sock(sk); |
| 1172 | return 0; | 1172 | |
| 1173 | return inet6_destroy_sock(sk); | ||
| 1173 | } | 1174 | } |
| 1174 | 1175 | ||
| 1175 | static int rawv6_init_sk(struct sock *sk) | 1176 | static int rawv6_init_sk(struct sock *sk) |
| @@ -1200,7 +1201,6 @@ struct proto rawv6_prot = { | |||
| 1200 | .disconnect = udp_disconnect, | 1201 | .disconnect = udp_disconnect, |
| 1201 | .ioctl = rawv6_ioctl, | 1202 | .ioctl = rawv6_ioctl, |
| 1202 | .init = rawv6_init_sk, | 1203 | .init = rawv6_init_sk, |
| 1203 | .destroy = inet6_destroy_sock, | ||
| 1204 | .setsockopt = rawv6_setsockopt, | 1204 | .setsockopt = rawv6_setsockopt, |
| 1205 | .getsockopt = rawv6_getsockopt, | 1205 | .getsockopt = rawv6_getsockopt, |
| 1206 | .sendmsg = rawv6_sendmsg, | 1206 | .sendmsg = rawv6_sendmsg, |
diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 220cffe9e63b..d1f3e19b06c7 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c | |||
| @@ -2196,8 +2196,12 @@ static int rt6_fill_node(struct sk_buff *skb, struct rt6_info *rt, | |||
| 2196 | 2196 | ||
| 2197 | NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); | 2197 | NLA_PUT_U32(skb, RTA_PRIORITY, rt->rt6i_metric); |
| 2198 | 2198 | ||
| 2199 | expires = (rt->rt6i_flags & RTF_EXPIRES) ? | 2199 | if (!(rt->rt6i_flags & RTF_EXPIRES)) |
| 2200 | rt->rt6i_expires - jiffies : 0; | 2200 | expires = 0; |
| 2201 | else if (rt->rt6i_expires - jiffies < INT_MAX) | ||
| 2202 | expires = rt->rt6i_expires - jiffies; | ||
| 2203 | else | ||
| 2204 | expires = INT_MAX; | ||
| 2201 | 2205 | ||
| 2202 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, | 2206 | if (rtnl_put_cacheinfo(skb, &rt->u.dst, 0, 0, 0, |
| 2203 | expires, rt->u.dst.error) < 0) | 2207 | expires, rt->u.dst.error) < 0) |
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index c4b1799da5d7..662c1ccfee26 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c | |||
| @@ -196,8 +196,6 @@ destroy_conntrack(struct nf_conntrack *nfct) | |||
| 196 | if (l4proto && l4proto->destroy) | 196 | if (l4proto && l4proto->destroy) |
| 197 | l4proto->destroy(ct); | 197 | l4proto->destroy(ct); |
| 198 | 198 | ||
| 199 | nf_ct_ext_destroy(ct); | ||
| 200 | |||
| 201 | rcu_read_unlock(); | 199 | rcu_read_unlock(); |
| 202 | 200 | ||
| 203 | spin_lock_bh(&nf_conntrack_lock); | 201 | spin_lock_bh(&nf_conntrack_lock); |
| @@ -520,6 +518,7 @@ static void nf_conntrack_free_rcu(struct rcu_head *head) | |||
| 520 | 518 | ||
| 521 | void nf_conntrack_free(struct nf_conn *ct) | 519 | void nf_conntrack_free(struct nf_conn *ct) |
| 522 | { | 520 | { |
| 521 | nf_ct_ext_destroy(ct); | ||
| 523 | call_rcu(&ct->rcu, nf_conntrack_free_rcu); | 522 | call_rcu(&ct->rcu, nf_conntrack_free_rcu); |
| 524 | } | 523 | } |
| 525 | EXPORT_SYMBOL_GPL(nf_conntrack_free); | 524 | EXPORT_SYMBOL_GPL(nf_conntrack_free); |
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c index bc11d7092032..9fda6ee95a31 100644 --- a/net/netfilter/nf_log.c +++ b/net/netfilter/nf_log.c | |||
| @@ -92,10 +92,6 @@ void nf_log_packet(int pf, | |||
| 92 | vsnprintf(prefix, sizeof(prefix), fmt, args); | 92 | vsnprintf(prefix, sizeof(prefix), fmt, args); |
| 93 | va_end(args); | 93 | va_end(args); |
| 94 | logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix); | 94 | logger->logfn(pf, hooknum, skb, in, out, loginfo, prefix); |
| 95 | } else if (net_ratelimit()) { | ||
| 96 | printk(KERN_WARNING "nf_log_packet: can\'t log since " | ||
| 97 | "no backend logging module loaded in! Please either " | ||
| 98 | "load one, or disable logging explicitly\n"); | ||
| 99 | } | 95 | } |
| 100 | rcu_read_unlock(); | 96 | rcu_read_unlock(); |
| 101 | } | 97 | } |
