aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2014-08-05 21:46:26 -0400
committerDavid S. Miller <davem@davemloft.net>2014-08-05 21:46:26 -0400
commitd247b6ab3ce6dd43665780865ec5fa145d9ab6bd (patch)
tree02eb71e4d64b678d7568d2b99f309e08f56ef2fe
parent30f00847953e3aa3f710d62ffd37b42042807900 (diff)
parent4d8fdc95c60e90d84c8257a0067ff4b1729a3757 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts: drivers/net/Makefile net/ipv6/sysctl_net_ipv6.c Two ipv6_table_template[] additions overlap, so the index of the ipv6_table[x] assignments needed to be adjusted. In the drivers/net/Makefile case, we've gotten rid of the garbage whereby we had to list every single USB networking driver in the top-level Makefile, there is just one "USB_NETWORKING" that guards everything. Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/bonding/bond_sysfs_slave.c4
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c6
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c22
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_ethtool.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c18
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c10
-rw-r--r--drivers/net/macvlan.c1
-rw-r--r--drivers/net/phy/mdio_bus.c1
-rw-r--r--drivers/net/usb/cdc_subset.c27
-rw-r--r--drivers/net/usb/usbnet.c8
-rw-r--r--drivers/net/xen-netfront.c74
-rw-r--r--include/linux/usb/usbnet.h3
-rw-r--r--include/net/ip_tunnels.h1
-rw-r--r--lib/iovec.c4
-rw-r--r--net/batman-adv/fragmentation.c10
-rw-r--r--net/bridge/br_fdb.c2
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/ip_tunnel.c29
-rw-r--r--net/ipv4/tcp_vegas.c3
-rw-r--r--net/ipv4/tcp_veno.c2
-rw-r--r--net/ipv6/sysctl_net_ipv6.c1
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c2
-rw-r--r--net/netfilter/nf_tables_api.c1
-rw-r--r--net/netfilter/xt_LED.c10
-rw-r--r--net/sctp/output.c2
27 files changed, 132 insertions, 121 deletions
diff --git a/drivers/net/bonding/bond_sysfs_slave.c b/drivers/net/bonding/bond_sysfs_slave.c
index 198677f58ce0..5cd532ca1cfe 100644
--- a/drivers/net/bonding/bond_sysfs_slave.c
+++ b/drivers/net/bonding/bond_sysfs_slave.c
@@ -125,7 +125,7 @@ int bond_sysfs_slave_add(struct slave *slave)
125 for (a = slave_attrs; *a; ++a) { 125 for (a = slave_attrs; *a; ++a) {
126 err = sysfs_create_file(&slave->kobj, &((*a)->attr)); 126 err = sysfs_create_file(&slave->kobj, &((*a)->attr));
127 if (err) { 127 if (err) {
128 kobject_del(&slave->kobj); 128 kobject_put(&slave->kobj);
129 return err; 129 return err;
130 } 130 }
131 } 131 }
@@ -140,5 +140,5 @@ void bond_sysfs_slave_del(struct slave *slave)
140 for (a = slave_attrs; *a; ++a) 140 for (a = slave_attrs; *a; ++a)
141 sysfs_remove_file(&slave->kobj, &((*a)->attr)); 141 sysfs_remove_file(&slave->kobj, &((*a)->attr));
142 142
143 kobject_del(&slave->kobj); 143 kobject_put(&slave->kobj);
144} 144}
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index d81e7167a8b5..29b9f082475d 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -633,8 +633,10 @@ static void emac_rx(struct net_device *dev)
633 } 633 }
634 634
635 /* Move data from EMAC */ 635 /* Move data from EMAC */
636 skb = dev_alloc_skb(rxlen + 4); 636 if (good_packet) {
637 if (good_packet && skb) { 637 skb = netdev_alloc_skb(dev, rxlen + 4);
638 if (!skb)
639 continue;
638 skb_reserve(skb, 2); 640 skb_reserve(skb, 2);
639 rdptr = (u8 *) skb_put(skb, rxlen - 4); 641 rdptr = (u8 *) skb_put(skb, rxlen - 4);
640 642
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 8afa579e7c40..a3dd5dc64f4c 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7830,17 +7830,18 @@ static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
7830 7830
7831static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *); 7831static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
7832 7832
7833/* Use GSO to workaround a rare TSO bug that may be triggered when the 7833/* Use GSO to workaround all TSO packets that meet HW bug conditions
7834 * TSO header is greater than 80 bytes. 7834 * indicated in tg3_tx_frag_set()
7835 */ 7835 */
7836static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb) 7836static int tg3_tso_bug(struct tg3 *tp, struct tg3_napi *tnapi,
7837 struct netdev_queue *txq, struct sk_buff *skb)
7837{ 7838{
7838 struct sk_buff *segs, *nskb; 7839 struct sk_buff *segs, *nskb;
7839 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3; 7840 u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
7840 7841
7841 /* Estimate the number of fragments in the worst case */ 7842 /* Estimate the number of fragments in the worst case */
7842 if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) { 7843 if (unlikely(tg3_tx_avail(tnapi) <= frag_cnt_est)) {
7843 netif_stop_queue(tp->dev); 7844 netif_tx_stop_queue(txq);
7844 7845
7845 /* netif_tx_stop_queue() must be done before checking 7846 /* netif_tx_stop_queue() must be done before checking
7846 * checking tx index in tg3_tx_avail() below, because in 7847 * checking tx index in tg3_tx_avail() below, because in
@@ -7848,13 +7849,14 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7848 * netif_tx_queue_stopped(). 7849 * netif_tx_queue_stopped().
7849 */ 7850 */
7850 smp_mb(); 7851 smp_mb();
7851 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est) 7852 if (tg3_tx_avail(tnapi) <= frag_cnt_est)
7852 return NETDEV_TX_BUSY; 7853 return NETDEV_TX_BUSY;
7853 7854
7854 netif_wake_queue(tp->dev); 7855 netif_tx_wake_queue(txq);
7855 } 7856 }
7856 7857
7857 segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6)); 7858 segs = skb_gso_segment(skb, tp->dev->features &
7859 ~(NETIF_F_TSO | NETIF_F_TSO6));
7858 if (IS_ERR(segs) || !segs) 7860 if (IS_ERR(segs) || !segs)
7859 goto tg3_tso_bug_end; 7861 goto tg3_tso_bug_end;
7860 7862
@@ -7930,7 +7932,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
7930 if (!skb_is_gso_v6(skb)) { 7932 if (!skb_is_gso_v6(skb)) {
7931 if (unlikely((ETH_HLEN + hdr_len) > 80) && 7933 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
7932 tg3_flag(tp, TSO_BUG)) 7934 tg3_flag(tp, TSO_BUG))
7933 return tg3_tso_bug(tp, skb); 7935 return tg3_tso_bug(tp, tnapi, txq, skb);
7934 7936
7935 ip_csum = iph->check; 7937 ip_csum = iph->check;
7936 ip_tot_len = iph->tot_len; 7938 ip_tot_len = iph->tot_len;
@@ -8061,7 +8063,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
8061 iph->tot_len = ip_tot_len; 8063 iph->tot_len = ip_tot_len;
8062 } 8064 }
8063 tcph->check = tcp_csum; 8065 tcph->check = tcp_csum;
8064 return tg3_tso_bug(tp, skb); 8066 return tg3_tso_bug(tp, tnapi, txq, skb);
8065 } 8067 }
8066 8068
8067 /* If the workaround fails due to memory/mapping 8069 /* If the workaround fails due to memory/mapping
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 3a77f9ead004..556aab75f490 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -600,9 +600,9 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
600 prefetch(bnad->netdev); 600 prefetch(bnad->netdev);
601 601
602 cq = ccb->sw_q; 602 cq = ccb->sw_q;
603 cmpl = &cq[ccb->producer_index];
604 603
605 while (packets < budget) { 604 while (packets < budget) {
605 cmpl = &cq[ccb->producer_index];
606 if (!cmpl->valid) 606 if (!cmpl->valid)
607 break; 607 break;
608 /* The 'valid' field is set by the adapter, only after writing 608 /* The 'valid' field is set by the adapter, only after writing
diff --git a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
index 882cad71ad62..d26adac6ab99 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_ethtool.c
@@ -997,10 +997,8 @@ bnad_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom,
997 unsigned long flags = 0; 997 unsigned long flags = 0;
998 int ret = 0; 998 int ret = 0;
999 999
1000 /* Check if the flash read request is valid */ 1000 /* Fill the magic value */
1001 if (eeprom->magic != (bnad->pcidev->vendor | 1001 eeprom->magic = bnad->pcidev->vendor | (bnad->pcidev->device << 16);
1002 (bnad->pcidev->device << 16)))
1003 return -EFAULT;
1004 1002
1005 /* Query the flash partition based on the offset */ 1003 /* Query the flash partition based on the offset */
1006 flash_part = bnad_get_flash_partition_by_offset(bnad, 1004 flash_part = bnad_get_flash_partition_by_offset(bnad,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
index 304e247bdf33..ffbae293cef5 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ctx.c
@@ -136,7 +136,7 @@ int qlcnic_82xx_issue_cmd(struct qlcnic_adapter *adapter,
136 rsp = qlcnic_poll_rsp(adapter); 136 rsp = qlcnic_poll_rsp(adapter);
137 137
138 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) { 138 if (rsp == QLCNIC_CDRP_RSP_TIMEOUT) {
139 dev_err(&pdev->dev, "card response timeout.\n"); 139 dev_err(&pdev->dev, "command timeout, response = 0x%x\n", rsp);
140 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT; 140 cmd->rsp.arg[0] = QLCNIC_RCODE_TIMEOUT;
141 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) { 141 } else if (rsp == QLCNIC_CDRP_RSP_FAIL) {
142 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err); 142 cmd->rsp.arg[0] = QLCRD32(adapter, QLCNIC_CDRP_ARG(1), &err);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index 1b7f3dbae289..141f116eb868 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -1290,17 +1290,25 @@ static u64 *qlcnic_fill_stats(u64 *data, void *stats, int type)
1290 1290
1291void qlcnic_update_stats(struct qlcnic_adapter *adapter) 1291void qlcnic_update_stats(struct qlcnic_adapter *adapter)
1292{ 1292{
1293 struct qlcnic_tx_queue_stats tx_stats;
1293 struct qlcnic_host_tx_ring *tx_ring; 1294 struct qlcnic_host_tx_ring *tx_ring;
1294 int ring; 1295 int ring;
1295 1296
1297 memset(&tx_stats, 0, sizeof(tx_stats));
1296 for (ring = 0; ring < adapter->drv_tx_rings; ring++) { 1298 for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
1297 tx_ring = &adapter->tx_ring[ring]; 1299 tx_ring = &adapter->tx_ring[ring];
1298 adapter->stats.xmit_on += tx_ring->tx_stats.xmit_on; 1300 tx_stats.xmit_on += tx_ring->tx_stats.xmit_on;
1299 adapter->stats.xmit_off += tx_ring->tx_stats.xmit_off; 1301 tx_stats.xmit_off += tx_ring->tx_stats.xmit_off;
1300 adapter->stats.xmitcalled += tx_ring->tx_stats.xmit_called; 1302 tx_stats.xmit_called += tx_ring->tx_stats.xmit_called;
1301 adapter->stats.xmitfinished += tx_ring->tx_stats.xmit_finished; 1303 tx_stats.xmit_finished += tx_ring->tx_stats.xmit_finished;
1302 adapter->stats.txbytes += tx_ring->tx_stats.tx_bytes; 1304 tx_stats.tx_bytes += tx_ring->tx_stats.tx_bytes;
1303 } 1305 }
1306
1307 adapter->stats.xmit_on = tx_stats.xmit_on;
1308 adapter->stats.xmit_off = tx_stats.xmit_off;
1309 adapter->stats.xmitcalled = tx_stats.xmit_called;
1310 adapter->stats.xmitfinished = tx_stats.xmit_finished;
1311 adapter->stats.txbytes = tx_stats.tx_bytes;
1304} 1312}
1305 1313
1306static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats) 1314static u64 *qlcnic_fill_tx_queue_stats(u64 *data, void *stats)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 0fdbcc8319f7..59846daf1379 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -2324,14 +2324,14 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter, struct net_device *netdev,
2324 if (err) 2324 if (err)
2325 return err; 2325 return err;
2326 2326
2327 qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
2328
2327 err = register_netdev(netdev); 2329 err = register_netdev(netdev);
2328 if (err) { 2330 if (err) {
2329 dev_err(&pdev->dev, "failed to register net device\n"); 2331 dev_err(&pdev->dev, "failed to register net device\n");
2330 return err; 2332 return err;
2331 } 2333 }
2332 2334
2333 qlcnic_dcb_init_dcbnl_ops(adapter->dcb);
2334
2335 return 0; 2335 return 0;
2336} 2336}
2337 2337
@@ -2624,13 +2624,13 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2624 if (err) 2624 if (err)
2625 goto err_out_disable_mbx_intr; 2625 goto err_out_disable_mbx_intr;
2626 2626
2627 if (adapter->portnum == 0)
2628 qlcnic_set_drv_version(adapter);
2629
2627 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac); 2630 err = qlcnic_setup_netdev(adapter, netdev, pci_using_dac);
2628 if (err) 2631 if (err)
2629 goto err_out_disable_mbx_intr; 2632 goto err_out_disable_mbx_intr;
2630 2633
2631 if (adapter->portnum == 0)
2632 qlcnic_set_drv_version(adapter);
2633
2634 pci_set_drvdata(pdev, adapter); 2634 pci_set_drvdata(pdev, adapter);
2635 2635
2636 if (qlcnic_82xx_check(adapter)) 2636 if (qlcnic_82xx_check(adapter))
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 958df383068a..ef8a5c20236a 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -646,6 +646,7 @@ static int macvlan_init(struct net_device *dev)
646 (lowerdev->state & MACVLAN_STATE_MASK); 646 (lowerdev->state & MACVLAN_STATE_MASK);
647 dev->features = lowerdev->features & MACVLAN_FEATURES; 647 dev->features = lowerdev->features & MACVLAN_FEATURES;
648 dev->features |= ALWAYS_ON_FEATURES; 648 dev->features |= ALWAYS_ON_FEATURES;
649 dev->vlan_features = lowerdev->vlan_features & MACVLAN_FEATURES;
649 dev->gso_max_size = lowerdev->gso_max_size; 650 dev->gso_max_size = lowerdev->gso_max_size;
650 dev->iflink = lowerdev->ifindex; 651 dev->iflink = lowerdev->ifindex;
651 dev->hard_header_len = lowerdev->hard_header_len; 652 dev->hard_header_len = lowerdev->hard_header_len;
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 203651ebccb0..4eaadcfcb0fe 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -255,7 +255,6 @@ int mdiobus_register(struct mii_bus *bus)
255 255
256 bus->dev.parent = bus->parent; 256 bus->dev.parent = bus->parent;
257 bus->dev.class = &mdio_bus_class; 257 bus->dev.class = &mdio_bus_class;
258 bus->dev.driver = bus->parent->driver;
259 bus->dev.groups = NULL; 258 bus->dev.groups = NULL;
260 dev_set_name(&bus->dev, "%s", bus->id); 259 dev_set_name(&bus->dev, "%s", bus->id);
261 260
diff --git a/drivers/net/usb/cdc_subset.c b/drivers/net/usb/cdc_subset.c
index 91f0919fe278..6ea98cff2d3b 100644
--- a/drivers/net/usb/cdc_subset.c
+++ b/drivers/net/usb/cdc_subset.c
@@ -85,14 +85,28 @@ static int always_connected (struct usbnet *dev)
85 * 85 *
86 *-------------------------------------------------------------------------*/ 86 *-------------------------------------------------------------------------*/
87 87
88static void m5632_recover(struct usbnet *dev)
89{
90 struct usb_device *udev = dev->udev;
91 struct usb_interface *intf = dev->intf;
92 int r;
93
94 r = usb_lock_device_for_reset(udev, intf);
95 if (r < 0)
96 return;
97
98 usb_reset_device(udev);
99 usb_unlock_device(udev);
100}
101
88static const struct driver_info ali_m5632_info = { 102static const struct driver_info ali_m5632_info = {
89 .description = "ALi M5632", 103 .description = "ALi M5632",
90 .flags = FLAG_POINTTOPOINT, 104 .flags = FLAG_POINTTOPOINT,
105 .recover = m5632_recover,
91}; 106};
92 107
93#endif 108#endif
94 109
95
96#ifdef CONFIG_USB_AN2720 110#ifdef CONFIG_USB_AN2720
97#define HAVE_HARDWARE 111#define HAVE_HARDWARE
98 112
@@ -326,12 +340,23 @@ static const struct usb_device_id products [] = {
326MODULE_DEVICE_TABLE(usb, products); 340MODULE_DEVICE_TABLE(usb, products);
327 341
328/*-------------------------------------------------------------------------*/ 342/*-------------------------------------------------------------------------*/
343static int dummy_prereset(struct usb_interface *intf)
344{
345 return 0;
346}
347
348static int dummy_postreset(struct usb_interface *intf)
349{
350 return 0;
351}
329 352
330static struct usb_driver cdc_subset_driver = { 353static struct usb_driver cdc_subset_driver = {
331 .name = "cdc_subset", 354 .name = "cdc_subset",
332 .probe = usbnet_probe, 355 .probe = usbnet_probe,
333 .suspend = usbnet_suspend, 356 .suspend = usbnet_suspend,
334 .resume = usbnet_resume, 357 .resume = usbnet_resume,
358 .pre_reset = dummy_prereset,
359 .post_reset = dummy_postreset,
335 .disconnect = usbnet_disconnect, 360 .disconnect = usbnet_disconnect,
336 .id_table = products, 361 .id_table = products,
337 .disable_hub_initiated_lpm = 1, 362 .disable_hub_initiated_lpm = 1,
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index f9e96c427558..5173821a9575 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1218,8 +1218,12 @@ void usbnet_tx_timeout (struct net_device *net)
1218 1218
1219 unlink_urbs (dev, &dev->txq); 1219 unlink_urbs (dev, &dev->txq);
1220 tasklet_schedule (&dev->bh); 1220 tasklet_schedule (&dev->bh);
1221 1221 /* this needs to be handled individually because the generic layer
1222 // FIXME: device recovery -- reset? 1222 * doesn't know what is sufficient and could not restore private
1223 * information if a remedy of an unconditional reset were used.
1224 */
1225 if (dev->driver_info->recover)
1226 (dev->driver_info->recover)(dev);
1223} 1227}
1224EXPORT_SYMBOL_GPL(usbnet_tx_timeout); 1228EXPORT_SYMBOL_GPL(usbnet_tx_timeout);
1225 1229
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 055222bae6e4..28204bc4f369 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1196,22 +1196,6 @@ static void xennet_release_rx_bufs(struct netfront_queue *queue)
1196 spin_unlock_bh(&queue->rx_lock); 1196 spin_unlock_bh(&queue->rx_lock);
1197} 1197}
1198 1198
1199static void xennet_uninit(struct net_device *dev)
1200{
1201 struct netfront_info *np = netdev_priv(dev);
1202 unsigned int num_queues = dev->real_num_tx_queues;
1203 struct netfront_queue *queue;
1204 unsigned int i;
1205
1206 for (i = 0; i < num_queues; ++i) {
1207 queue = &np->queues[i];
1208 xennet_release_tx_bufs(queue);
1209 xennet_release_rx_bufs(queue);
1210 gnttab_free_grant_references(queue->gref_tx_head);
1211 gnttab_free_grant_references(queue->gref_rx_head);
1212 }
1213}
1214
1215static netdev_features_t xennet_fix_features(struct net_device *dev, 1199static netdev_features_t xennet_fix_features(struct net_device *dev,
1216 netdev_features_t features) 1200 netdev_features_t features)
1217{ 1201{
@@ -1313,7 +1297,6 @@ static void xennet_poll_controller(struct net_device *dev)
1313 1297
1314static const struct net_device_ops xennet_netdev_ops = { 1298static const struct net_device_ops xennet_netdev_ops = {
1315 .ndo_open = xennet_open, 1299 .ndo_open = xennet_open,
1316 .ndo_uninit = xennet_uninit,
1317 .ndo_stop = xennet_close, 1300 .ndo_stop = xennet_close,
1318 .ndo_start_xmit = xennet_start_xmit, 1301 .ndo_start_xmit = xennet_start_xmit,
1319 .ndo_change_mtu = xennet_change_mtu, 1302 .ndo_change_mtu = xennet_change_mtu,
@@ -1455,6 +1438,11 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1455 1438
1456 napi_synchronize(&queue->napi); 1439 napi_synchronize(&queue->napi);
1457 1440
1441 xennet_release_tx_bufs(queue);
1442 xennet_release_rx_bufs(queue);
1443 gnttab_free_grant_references(queue->gref_tx_head);
1444 gnttab_free_grant_references(queue->gref_rx_head);
1445
1458 /* End access and free the pages */ 1446 /* End access and free the pages */
1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1447 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1448 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1827,8 +1815,8 @@ static int xennet_create_queues(struct netfront_info *info,
1827 1815
1828 ret = xennet_init_queue(queue); 1816 ret = xennet_init_queue(queue);
1829 if (ret < 0) { 1817 if (ret < 0) {
1830 dev_warn(&info->netdev->dev, "only created %d queues\n", 1818 dev_warn(&info->netdev->dev,
1831 num_queues); 1819 "only created %d queues\n", i);
1832 num_queues = i; 1820 num_queues = i;
1833 break; 1821 break;
1834 } 1822 }
@@ -2001,7 +1989,7 @@ abort_transaction_no_dev_fatal:
2001 info->queues = NULL; 1989 info->queues = NULL;
2002 rtnl_lock(); 1990 rtnl_lock();
2003 netif_set_real_num_tx_queues(info->netdev, 0); 1991 netif_set_real_num_tx_queues(info->netdev, 0);
2004 rtnl_lock(); 1992 rtnl_unlock();
2005 out: 1993 out:
2006 return err; 1994 return err;
2007} 1995}
@@ -2010,10 +1998,7 @@ static int xennet_connect(struct net_device *dev)
2010{ 1998{
2011 struct netfront_info *np = netdev_priv(dev); 1999 struct netfront_info *np = netdev_priv(dev);
2012 unsigned int num_queues = 0; 2000 unsigned int num_queues = 0;
2013 int i, requeue_idx, err; 2001 int err;
2014 struct sk_buff *skb;
2015 grant_ref_t ref;
2016 struct xen_netif_rx_request *req;
2017 unsigned int feature_rx_copy; 2002 unsigned int feature_rx_copy;
2018 unsigned int j = 0; 2003 unsigned int j = 0;
2019 struct netfront_queue *queue = NULL; 2004 struct netfront_queue *queue = NULL;
@@ -2040,47 +2025,8 @@ static int xennet_connect(struct net_device *dev)
2040 netdev_update_features(dev); 2025 netdev_update_features(dev);
2041 rtnl_unlock(); 2026 rtnl_unlock();
2042 2027
2043 /* By now, the queue structures have been set up */
2044 for (j = 0; j < num_queues; ++j) {
2045 queue = &np->queues[j];
2046
2047 /* Step 1: Discard all pending TX packet fragments. */
2048 spin_lock_irq(&queue->tx_lock);
2049 xennet_release_tx_bufs(queue);
2050 spin_unlock_irq(&queue->tx_lock);
2051
2052 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2053 spin_lock_bh(&queue->rx_lock);
2054
2055 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2056 skb_frag_t *frag;
2057 const struct page *page;
2058 if (!queue->rx_skbs[i])
2059 continue;
2060
2061 skb = queue->rx_skbs[requeue_idx] = xennet_get_rx_skb(queue, i);
2062 ref = queue->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(queue, i);
2063 req = RING_GET_REQUEST(&queue->rx, requeue_idx);
2064
2065 frag = &skb_shinfo(skb)->frags[0];
2066 page = skb_frag_page(frag);
2067 gnttab_grant_foreign_access_ref(
2068 ref, queue->info->xbdev->otherend_id,
2069 pfn_to_mfn(page_to_pfn(page)),
2070 0);
2071 req->gref = ref;
2072 req->id = requeue_idx;
2073
2074 requeue_idx++;
2075 }
2076
2077 queue->rx.req_prod_pvt = requeue_idx;
2078
2079 spin_unlock_bh(&queue->rx_lock);
2080 }
2081
2082 /* 2028 /*
2083 * Step 3: All public and private state should now be sane. Get 2029 * All public and private state should now be sane. Get
2084 * ready to start sending and receiving packets and give the driver 2030 * ready to start sending and receiving packets and give the driver
2085 * domain a kick because we've probably just requeued some 2031 * domain a kick because we've probably just requeued some
2086 * packets. 2032 * packets.
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 0662e98fef72..26088feb6608 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -148,6 +148,9 @@ struct driver_info {
148 struct sk_buff *(*tx_fixup)(struct usbnet *dev, 148 struct sk_buff *(*tx_fixup)(struct usbnet *dev,
149 struct sk_buff *skb, gfp_t flags); 149 struct sk_buff *skb, gfp_t flags);
150 150
151 /* recover from timeout */
152 void (*recover)(struct usbnet *dev);
153
151 /* early initialization code, can sleep. This is for minidrivers 154 /* early initialization code, can sleep. This is for minidrivers
152 * having 'subminidrivers' that need to do extra initialization 155 * having 'subminidrivers' that need to do extra initialization
153 * right after minidriver have initialized hardware. */ 156 * right after minidriver have initialized hardware. */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index a4daf9eb8562..8dd8cab88b87 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -40,6 +40,7 @@ struct ip_tunnel_prl_entry {
40 40
41struct ip_tunnel_dst { 41struct ip_tunnel_dst {
42 struct dst_entry __rcu *dst; 42 struct dst_entry __rcu *dst;
43 __be32 saddr;
43}; 44};
44 45
45struct ip_tunnel { 46struct ip_tunnel {
diff --git a/lib/iovec.c b/lib/iovec.c
index 7a7c2da4cddf..df3abd1eaa4a 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -85,6 +85,10 @@ EXPORT_SYMBOL(memcpy_toiovecend);
85int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, 85int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
86 int offset, int len) 86 int offset, int len)
87{ 87{
88 /* No data? Done! */
89 if (len == 0)
90 return 0;
91
88 /* Skip over the finished iovecs */ 92 /* Skip over the finished iovecs */
89 while (offset >= iov->iov_len) { 93 while (offset >= iov->iov_len) {
90 offset -= iov->iov_len; 94 offset -= iov->iov_len;
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index f14e54a05691..022d18ab27a6 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -128,6 +128,7 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
128{ 128{
129 struct batadv_frag_table_entry *chain; 129 struct batadv_frag_table_entry *chain;
130 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr; 130 struct batadv_frag_list_entry *frag_entry_new = NULL, *frag_entry_curr;
131 struct batadv_frag_list_entry *frag_entry_last = NULL;
131 struct batadv_frag_packet *frag_packet; 132 struct batadv_frag_packet *frag_packet;
132 uint8_t bucket; 133 uint8_t bucket;
133 uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet); 134 uint16_t seqno, hdr_size = sizeof(struct batadv_frag_packet);
@@ -180,11 +181,14 @@ static bool batadv_frag_insert_packet(struct batadv_orig_node *orig_node,
180 ret = true; 181 ret = true;
181 goto out; 182 goto out;
182 } 183 }
184
185 /* store current entry because it could be the last in list */
186 frag_entry_last = frag_entry_curr;
183 } 187 }
184 188
185 /* Reached the end of the list, so insert after 'frag_entry_curr'. */ 189 /* Reached the end of the list, so insert after 'frag_entry_last'. */
186 if (likely(frag_entry_curr)) { 190 if (likely(frag_entry_last)) {
187 hlist_add_after(&frag_entry_curr->list, &frag_entry_new->list); 191 hlist_add_after(&frag_entry_last->list, &frag_entry_new->list);
188 chain->size += skb->len - hdr_size; 192 chain->size += skb->len - hdr_size;
189 chain->timestamp = jiffies; 193 chain->timestamp = jiffies;
190 ret = true; 194 ret = true;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index 0bb9d8b63dd2..6f6c95cfe8f2 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -629,7 +629,7 @@ static int fdb_fill_info(struct sk_buff *skb, const struct net_bridge *br,
629 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci)) 629 if (nla_put(skb, NDA_CACHEINFO, sizeof(ci), &ci))
630 goto nla_put_failure; 630 goto nla_put_failure;
631 631
632 if (nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id)) 632 if (fdb->vlan_id && nla_put(skb, NDA_VLAN, sizeof(u16), &fdb->vlan_id))
633 goto nla_put_failure; 633 goto nla_put_failure;
634 634
635 return nlmsg_end(skb, nlh); 635 return nlmsg_end(skb, nlh);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 3dec0293a7c5..224506a6fa80 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2976,9 +2976,9 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2976 tail = nskb; 2976 tail = nskb;
2977 2977
2978 __copy_skb_header(nskb, head_skb); 2978 __copy_skb_header(nskb, head_skb);
2979 nskb->mac_len = head_skb->mac_len;
2980 2979
2981 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); 2980 skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
2981 skb_reset_mac_len(nskb);
2982 2982
2983 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, 2983 skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
2984 nskb->data - tnl_hlen, 2984 nskb->data - tnl_hlen,
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index dd8c8c765799..afed1aac2638 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -69,23 +69,25 @@ static unsigned int ip_tunnel_hash(__be32 key, __be32 remote)
69} 69}
70 70
71static void __tunnel_dst_set(struct ip_tunnel_dst *idst, 71static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
72 struct dst_entry *dst) 72 struct dst_entry *dst, __be32 saddr)
73{ 73{
74 struct dst_entry *old_dst; 74 struct dst_entry *old_dst;
75 75
76 dst_clone(dst); 76 dst_clone(dst);
77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); 77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
78 dst_release(old_dst); 78 dst_release(old_dst);
79 idst->saddr = saddr;
79} 80}
80 81
81static void tunnel_dst_set(struct ip_tunnel *t, struct dst_entry *dst) 82static void tunnel_dst_set(struct ip_tunnel *t,
83 struct dst_entry *dst, __be32 saddr)
82{ 84{
83 __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst); 85 __tunnel_dst_set(this_cpu_ptr(t->dst_cache), dst, saddr);
84} 86}
85 87
86static void tunnel_dst_reset(struct ip_tunnel *t) 88static void tunnel_dst_reset(struct ip_tunnel *t)
87{ 89{
88 tunnel_dst_set(t, NULL); 90 tunnel_dst_set(t, NULL, 0);
89} 91}
90 92
91void ip_tunnel_dst_reset_all(struct ip_tunnel *t) 93void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
@@ -93,20 +95,25 @@ void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
93 int i; 95 int i;
94 96
95 for_each_possible_cpu(i) 97 for_each_possible_cpu(i)
96 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL); 98 __tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL, 0);
97} 99}
98EXPORT_SYMBOL(ip_tunnel_dst_reset_all); 100EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
99 101
100static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie) 102static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
103 u32 cookie, __be32 *saddr)
101{ 104{
105 struct ip_tunnel_dst *idst;
102 struct dst_entry *dst; 106 struct dst_entry *dst;
103 107
104 rcu_read_lock(); 108 rcu_read_lock();
105 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 109 idst = this_cpu_ptr(t->dst_cache);
110 dst = rcu_dereference(idst->dst);
106 if (dst && !atomic_inc_not_zero(&dst->__refcnt)) 111 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
107 dst = NULL; 112 dst = NULL;
108 if (dst) { 113 if (dst) {
109 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 114 if (!dst->obsolete || dst->ops->check(dst, cookie)) {
115 *saddr = idst->saddr;
116 } else {
110 tunnel_dst_reset(t); 117 tunnel_dst_reset(t);
111 dst_release(dst); 118 dst_release(dst);
112 dst = NULL; 119 dst = NULL;
@@ -367,7 +374,7 @@ static int ip_tunnel_bind_dev(struct net_device *dev)
367 374
368 if (!IS_ERR(rt)) { 375 if (!IS_ERR(rt)) {
369 tdev = rt->dst.dev; 376 tdev = rt->dst.dev;
370 tunnel_dst_set(tunnel, &rt->dst); 377 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
371 ip_rt_put(rt); 378 ip_rt_put(rt);
372 } 379 }
373 if (dev->type != ARPHRD_ETHER) 380 if (dev->type != ARPHRD_ETHER)
@@ -610,7 +617,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
610 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr, 617 init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
611 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link); 618 tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
612 619
613 rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL; 620 rt = connected ? tunnel_rtable_get(tunnel, 0, &fl4.saddr) : NULL;
614 621
615 if (!rt) { 622 if (!rt) {
616 rt = ip_route_output_key(tunnel->net, &fl4); 623 rt = ip_route_output_key(tunnel->net, &fl4);
@@ -620,7 +627,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
620 goto tx_error; 627 goto tx_error;
621 } 628 }
622 if (connected) 629 if (connected)
623 tunnel_dst_set(tunnel, &rt->dst); 630 tunnel_dst_set(tunnel, &rt->dst, fl4.saddr);
624 } 631 }
625 632
626 if (rt->dst.dev == dev) { 633 if (rt->dst.dev == dev) {
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 9a5e05f27f4f..b40ad897f945 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -218,7 +218,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
218 * This is: 218 * This is:
219 * (actual rate in segments) * baseRTT 219 * (actual rate in segments) * baseRTT
220 */ 220 */
221 target_cwnd = tp->snd_cwnd * vegas->baseRTT / rtt; 221 target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
222 do_div(target_cwnd, rtt);
222 223
223 /* Calculate the difference between the window we had, 224 /* Calculate the difference between the window we had,
224 * and the window we would like to have. This quantity 225 * and the window we would like to have. This quantity
diff --git a/net/ipv4/tcp_veno.c b/net/ipv4/tcp_veno.c
index 27b9825753d1..8276977d2c85 100644
--- a/net/ipv4/tcp_veno.c
+++ b/net/ipv4/tcp_veno.c
@@ -144,7 +144,7 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
144 144
145 rtt = veno->minrtt; 145 rtt = veno->minrtt;
146 146
147 target_cwnd = (tp->snd_cwnd * veno->basertt); 147 target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
148 target_cwnd <<= V_PARAM_SHIFT; 148 target_cwnd <<= V_PARAM_SHIFT;
149 do_div(target_cwnd, rtt); 149 do_div(target_cwnd, rtt);
150 150
diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
index 5bf7b61f8ae8..0c56c93619e0 100644
--- a/net/ipv6/sysctl_net_ipv6.c
+++ b/net/ipv6/sysctl_net_ipv6.c
@@ -82,6 +82,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
82 ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply; 82 ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
83 ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency; 83 ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
84 ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels; 84 ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
85 ipv6_table[4].data = &net->ipv6.sysctl.fwmark_reflect;
85 86
86 ipv6_route_table = ipv6_route_sysctl_init(net); 87 ipv6_route_table = ipv6_route_sysctl_init(net);
87 if (!ipv6_route_table) 88 if (!ipv6_route_table)
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 73ba1cc7a88d..6f70bdd3a90a 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -967,8 +967,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
967 iph->nexthdr = IPPROTO_IPV6; 967 iph->nexthdr = IPPROTO_IPV6;
968 iph->payload_len = old_iph->payload_len; 968 iph->payload_len = old_iph->payload_len;
969 be16_add_cpu(&iph->payload_len, sizeof(*old_iph)); 969 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
970 iph->priority = old_iph->priority;
971 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl)); 970 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
971 ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
972 iph->daddr = cp->daddr.in6; 972 iph->daddr = cp->daddr.in6;
973 iph->saddr = saddr; 973 iph->saddr = saddr;
974 iph->hop_limit = old_iph->hop_limit; 974 iph->hop_limit = old_iph->hop_limit;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 93692d692ebc..b8035c2d6667 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -3144,6 +3144,7 @@ static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set,
3144 if (set->flags & NFT_SET_MAP) 3144 if (set->flags & NFT_SET_MAP)
3145 nft_data_uninit(&elem.data, set->dtype); 3145 nft_data_uninit(&elem.data, set->dtype);
3146 3146
3147 return 0;
3147err2: 3148err2:
3148 nft_data_uninit(&elem.key, desc.type); 3149 nft_data_uninit(&elem.key, desc.type);
3149err1: 3150err1:
diff --git a/net/netfilter/xt_LED.c b/net/netfilter/xt_LED.c
index f14bcf23dc9f..3ba31c194cce 100644
--- a/net/netfilter/xt_LED.c
+++ b/net/netfilter/xt_LED.c
@@ -50,11 +50,14 @@ struct xt_led_info_internal {
50 struct timer_list timer; 50 struct timer_list timer;
51}; 51};
52 52
53#define XT_LED_BLINK_DELAY 50 /* ms */
54
53static unsigned int 55static unsigned int
54led_tg(struct sk_buff *skb, const struct xt_action_param *par) 56led_tg(struct sk_buff *skb, const struct xt_action_param *par)
55{ 57{
56 const struct xt_led_info *ledinfo = par->targinfo; 58 const struct xt_led_info *ledinfo = par->targinfo;
57 struct xt_led_info_internal *ledinternal = ledinfo->internal_data; 59 struct xt_led_info_internal *ledinternal = ledinfo->internal_data;
60 unsigned long led_delay = XT_LED_BLINK_DELAY;
58 61
59 /* 62 /*
60 * If "always blink" is enabled, and there's still some time until the 63 * If "always blink" is enabled, and there's still some time until the
@@ -62,9 +65,10 @@ led_tg(struct sk_buff *skb, const struct xt_action_param *par)
62 */ 65 */
63 if ((ledinfo->delay > 0) && ledinfo->always_blink && 66 if ((ledinfo->delay > 0) && ledinfo->always_blink &&
64 timer_pending(&ledinternal->timer)) 67 timer_pending(&ledinternal->timer))
65 led_trigger_event(&ledinternal->netfilter_led_trigger, LED_OFF); 68 led_trigger_blink_oneshot(&ledinternal->netfilter_led_trigger,
66 69 &led_delay, &led_delay, 1);
67 led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL); 70 else
71 led_trigger_event(&ledinternal->netfilter_led_trigger, LED_FULL);
68 72
69 /* If there's a positive delay, start/update the timer */ 73 /* If there's a positive delay, start/update the timer */
70 if (ledinfo->delay > 0) { 74 if (ledinfo->delay > 0) {
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 1eedba5195a3..42dffd428389 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -599,7 +599,7 @@ out:
599 return err; 599 return err;
600no_route: 600no_route:
601 kfree_skb(nskb); 601 kfree_skb(nskb);
602 IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES); 602 IP_INC_STATS(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
603 603
604 /* FIXME: Returning the 'err' will effect all the associations 604 /* FIXME: Returning the 'err' will effect all the associations
605 * associated with a socket, although only one of the paths of the 605 * associated with a socket, although only one of the paths of the