aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2011-05-12 23:01:55 -0400
committerDavid S. Miller <davem@davemloft.net>2011-05-12 23:01:55 -0400
commit5c5095494fb545f53b80cbb7539679a10a3472a6 (patch)
treed7c40cd66a58030ddef369bcb9acd8d95e2ac864
parent4d586b823acc46c55c889ae1798de236c9d403da (diff)
parentdef57687e9579b7a797681990dff763c411f5347 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-next-2.6
-rw-r--r--drivers/net/bonding/bond_main.c157
-rw-r--r--drivers/net/tg3.c69
-rw-r--r--drivers/net/usb/cdc_ncm.c13
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h4
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/net/garp.h1
-rw-r--r--include/net/ip_vs.h9
-rw-r--r--net/802/garp.c14
-rw-r--r--net/8021q/vlan_dev.c6
-rw-r--r--net/core/dev.c25
-rw-r--r--net/core/ethtool.c2
-rw-r--r--net/ipv4/ip_forward.c2
-rw-r--r--net/ipv4/ip_options.c4
-rw-r--r--net/irda/ircomm/ircomm_tty.c14
-rw-r--r--net/l2tp/l2tp_core.c10
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c24
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c97
-rw-r--r--net/sctp/socket.c13
19 files changed, 283 insertions, 191 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 6312db1f7838..088fd845ffdf 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -344,32 +344,6 @@ out:
344} 344}
345 345
346/** 346/**
347 * bond_has_challenged_slaves
348 * @bond: the bond we're working on
349 *
350 * Searches the slave list. Returns 1 if a vlan challenged slave
351 * was found, 0 otherwise.
352 *
353 * Assumes bond->lock is held.
354 */
355static int bond_has_challenged_slaves(struct bonding *bond)
356{
357 struct slave *slave;
358 int i;
359
360 bond_for_each_slave(bond, slave, i) {
361 if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) {
362 pr_debug("found VLAN challenged slave - %s\n",
363 slave->dev->name);
364 return 1;
365 }
366 }
367
368 pr_debug("no VLAN challenged slaves found\n");
369 return 0;
370}
371
372/**
373 * bond_next_vlan - safely skip to the next item in the vlans list. 347 * bond_next_vlan - safely skip to the next item in the vlans list.
374 * @bond: the bond we're working on 348 * @bond: the bond we're working on
375 * @curr: item we're advancing from 349 * @curr: item we're advancing from
@@ -1406,52 +1380,68 @@ static int bond_sethwaddr(struct net_device *bond_dev,
1406 return 0; 1380 return 0;
1407} 1381}
1408 1382
1409#define BOND_VLAN_FEATURES \ 1383static u32 bond_fix_features(struct net_device *dev, u32 features)
1410 (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \
1411 NETIF_F_HW_VLAN_FILTER)
1412
1413/*
1414 * Compute the common dev->feature set available to all slaves. Some
1415 * feature bits are managed elsewhere, so preserve those feature bits
1416 * on the master device.
1417 */
1418static int bond_compute_features(struct bonding *bond)
1419{ 1384{
1420 struct slave *slave; 1385 struct slave *slave;
1421 struct net_device *bond_dev = bond->dev; 1386 struct bonding *bond = netdev_priv(dev);
1422 u32 features = bond_dev->features; 1387 u32 mask;
1423 u32 vlan_features = 0;
1424 unsigned short max_hard_header_len = max((u16)ETH_HLEN,
1425 bond_dev->hard_header_len);
1426 int i; 1388 int i;
1427 1389
1428 features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); 1390 read_lock(&bond->lock);
1429 features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_NOCACHE_COPY;
1430 1391
1431 if (!bond->first_slave) 1392 if (!bond->first_slave) {
1432 goto done; 1393 /* Disable adding VLANs to empty bond. But why? --mq */
1394 features |= NETIF_F_VLAN_CHALLENGED;
1395 goto out;
1396 }
1433 1397
1398 mask = features;
1434 features &= ~NETIF_F_ONE_FOR_ALL; 1399 features &= ~NETIF_F_ONE_FOR_ALL;
1400 features |= NETIF_F_ALL_FOR_ALL;
1435 1401
1436 vlan_features = bond->first_slave->dev->vlan_features;
1437 bond_for_each_slave(bond, slave, i) { 1402 bond_for_each_slave(bond, slave, i) {
1438 features = netdev_increment_features(features, 1403 features = netdev_increment_features(features,
1439 slave->dev->features, 1404 slave->dev->features,
1440 NETIF_F_ONE_FOR_ALL); 1405 mask);
1406 }
1407
1408out:
1409 read_unlock(&bond->lock);
1410 return features;
1411}
1412
1413#define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \
1414 NETIF_F_SOFT_FEATURES | \
1415 NETIF_F_LRO)
1416
1417static void bond_compute_features(struct bonding *bond)
1418{
1419 struct slave *slave;
1420 struct net_device *bond_dev = bond->dev;
1421 u32 vlan_features = BOND_VLAN_FEATURES;
1422 unsigned short max_hard_header_len = ETH_HLEN;
1423 int i;
1424
1425 read_lock(&bond->lock);
1426
1427 if (!bond->first_slave)
1428 goto done;
1429
1430 bond_for_each_slave(bond, slave, i) {
1441 vlan_features = netdev_increment_features(vlan_features, 1431 vlan_features = netdev_increment_features(vlan_features,
1442 slave->dev->vlan_features, 1432 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1443 NETIF_F_ONE_FOR_ALL); 1433
1444 if (slave->dev->hard_header_len > max_hard_header_len) 1434 if (slave->dev->hard_header_len > max_hard_header_len)
1445 max_hard_header_len = slave->dev->hard_header_len; 1435 max_hard_header_len = slave->dev->hard_header_len;
1446 } 1436 }
1447 1437
1448done: 1438done:
1449 features |= (bond_dev->features & BOND_VLAN_FEATURES); 1439 bond_dev->vlan_features = vlan_features;
1450 bond_dev->features = netdev_fix_features(bond_dev, features);
1451 bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features);
1452 bond_dev->hard_header_len = max_hard_header_len; 1440 bond_dev->hard_header_len = max_hard_header_len;
1453 1441
1454 return 0; 1442 read_unlock(&bond->lock);
1443
1444 netdev_change_features(bond_dev);
1455} 1445}
1456 1446
1457static void bond_setup_by_slave(struct net_device *bond_dev, 1447static void bond_setup_by_slave(struct net_device *bond_dev,
@@ -1544,7 +1534,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1544 struct netdev_hw_addr *ha; 1534 struct netdev_hw_addr *ha;
1545 struct sockaddr addr; 1535 struct sockaddr addr;
1546 int link_reporting; 1536 int link_reporting;
1547 int old_features = bond_dev->features;
1548 int res = 0; 1537 int res = 0;
1549 1538
1550 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && 1539 if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL &&
@@ -1577,16 +1566,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1577 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", 1566 pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
1578 bond_dev->name, slave_dev->name, 1567 bond_dev->name, slave_dev->name,
1579 slave_dev->name, bond_dev->name); 1568 slave_dev->name, bond_dev->name);
1580 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
1581 } 1569 }
1582 } else { 1570 } else {
1583 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); 1571 pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
1584 if (bond->slave_cnt == 0) {
1585 /* First slave, and it is not VLAN challenged,
1586 * so remove the block of adding VLANs over the bond.
1587 */
1588 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
1589 }
1590 } 1572 }
1591 1573
1592 /* 1574 /*
@@ -1775,10 +1757,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1775 new_slave->delay = 0; 1757 new_slave->delay = 0;
1776 new_slave->link_failure_count = 0; 1758 new_slave->link_failure_count = 0;
1777 1759
1778 bond_compute_features(bond);
1779
1780 write_unlock_bh(&bond->lock); 1760 write_unlock_bh(&bond->lock);
1781 1761
1762 bond_compute_features(bond);
1763
1782 read_lock(&bond->lock); 1764 read_lock(&bond->lock);
1783 1765
1784 new_slave->last_arp_rx = jiffies; 1766 new_slave->last_arp_rx = jiffies;
@@ -1958,7 +1940,7 @@ err_free:
1958 kfree(new_slave); 1940 kfree(new_slave);
1959 1941
1960err_undo_flags: 1942err_undo_flags:
1961 bond_dev->features = old_features; 1943 bond_compute_features(bond);
1962 1944
1963 return res; 1945 return res;
1964} 1946}
@@ -1979,6 +1961,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1979 struct bonding *bond = netdev_priv(bond_dev); 1961 struct bonding *bond = netdev_priv(bond_dev);
1980 struct slave *slave, *oldcurrent; 1962 struct slave *slave, *oldcurrent;
1981 struct sockaddr addr; 1963 struct sockaddr addr;
1964 u32 old_features = bond_dev->features;
1982 1965
1983 /* slave is not a slave or master is not master of this slave */ 1966 /* slave is not a slave or master is not master of this slave */
1984 if (!(slave_dev->flags & IFF_SLAVE) || 1967 if (!(slave_dev->flags & IFF_SLAVE) ||
@@ -2039,8 +2022,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2039 /* release the slave from its bond */ 2022 /* release the slave from its bond */
2040 bond_detach_slave(bond, slave); 2023 bond_detach_slave(bond, slave);
2041 2024
2042 bond_compute_features(bond);
2043
2044 if (bond->primary_slave == slave) 2025 if (bond->primary_slave == slave)
2045 bond->primary_slave = NULL; 2026 bond->primary_slave = NULL;
2046 2027
@@ -2084,24 +2065,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
2084 */ 2065 */
2085 memset(bond_dev->dev_addr, 0, bond_dev->addr_len); 2066 memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
2086 2067
2087 if (!bond->vlgrp) { 2068 if (bond->vlgrp) {
2088 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2089 } else {
2090 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 2069 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
2091 bond_dev->name, bond_dev->name); 2070 bond_dev->name, bond_dev->name);
2092 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", 2071 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
2093 bond_dev->name); 2072 bond_dev->name);
2094 } 2073 }
2095 } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2096 !bond_has_challenged_slaves(bond)) {
2097 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
2098 bond_dev->name, slave_dev->name, bond_dev->name);
2099 bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED;
2100 } 2074 }
2101 2075
2102 write_unlock_bh(&bond->lock); 2076 write_unlock_bh(&bond->lock);
2103 unblock_netpoll_tx(); 2077 unblock_netpoll_tx();
2104 2078
2079 bond_compute_features(bond);
2080 if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
2081 (old_features & NETIF_F_VLAN_CHALLENGED))
2082 pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n",
2083 bond_dev->name, slave_dev->name, bond_dev->name);
2084
2105 /* must do this from outside any spinlocks */ 2085 /* must do this from outside any spinlocks */
2106 bond_destroy_slave_symlinks(bond_dev, slave_dev); 2086 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2107 2087
@@ -2219,8 +2199,6 @@ static int bond_release_all(struct net_device *bond_dev)
2219 bond_alb_deinit_slave(bond, slave); 2199 bond_alb_deinit_slave(bond, slave);
2220 } 2200 }
2221 2201
2222 bond_compute_features(bond);
2223
2224 bond_destroy_slave_symlinks(bond_dev, slave_dev); 2202 bond_destroy_slave_symlinks(bond_dev, slave_dev);
2225 bond_del_vlans_from_slave(bond, slave_dev); 2203 bond_del_vlans_from_slave(bond, slave_dev);
2226 2204
@@ -2269,9 +2247,7 @@ static int bond_release_all(struct net_device *bond_dev)
2269 */ 2247 */
2270 memset(bond_dev->dev_addr, 0, bond_dev->addr_len); 2248 memset(bond_dev->dev_addr, 0, bond_dev->addr_len);
2271 2249
2272 if (!bond->vlgrp) { 2250 if (bond->vlgrp) {
2273 bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
2274 } else {
2275 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", 2251 pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n",
2276 bond_dev->name, bond_dev->name); 2252 bond_dev->name, bond_dev->name);
2277 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", 2253 pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n",
@@ -2282,6 +2258,9 @@ static int bond_release_all(struct net_device *bond_dev)
2282 2258
2283out: 2259out:
2284 write_unlock_bh(&bond->lock); 2260 write_unlock_bh(&bond->lock);
2261
2262 bond_compute_features(bond);
2263
2285 return 0; 2264 return 0;
2286} 2265}
2287 2266
@@ -4337,11 +4316,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev,
4337static const struct ethtool_ops bond_ethtool_ops = { 4316static const struct ethtool_ops bond_ethtool_ops = {
4338 .get_drvinfo = bond_ethtool_get_drvinfo, 4317 .get_drvinfo = bond_ethtool_get_drvinfo,
4339 .get_link = ethtool_op_get_link, 4318 .get_link = ethtool_op_get_link,
4340 .get_tx_csum = ethtool_op_get_tx_csum,
4341 .get_sg = ethtool_op_get_sg,
4342 .get_tso = ethtool_op_get_tso,
4343 .get_ufo = ethtool_op_get_ufo,
4344 .get_flags = ethtool_op_get_flags,
4345}; 4319};
4346 4320
4347static const struct net_device_ops bond_netdev_ops = { 4321static const struct net_device_ops bond_netdev_ops = {
@@ -4367,6 +4341,7 @@ static const struct net_device_ops bond_netdev_ops = {
4367#endif 4341#endif
4368 .ndo_add_slave = bond_enslave, 4342 .ndo_add_slave = bond_enslave,
4369 .ndo_del_slave = bond_release, 4343 .ndo_del_slave = bond_release,
4344 .ndo_fix_features = bond_fix_features,
4370}; 4345};
4371 4346
4372static void bond_destructor(struct net_device *bond_dev) 4347static void bond_destructor(struct net_device *bond_dev)
@@ -4422,14 +4397,14 @@ static void bond_setup(struct net_device *bond_dev)
4422 * when there are slaves that are not hw accel 4397 * when there are slaves that are not hw accel
4423 * capable 4398 * capable
4424 */ 4399 */
4425 bond_dev->features |= (NETIF_F_HW_VLAN_TX |
4426 NETIF_F_HW_VLAN_RX |
4427 NETIF_F_HW_VLAN_FILTER);
4428 4400
4429 /* By default, we enable GRO on bonding devices. 4401 bond_dev->hw_features = BOND_VLAN_FEATURES |
4430 * Actual support requires lowlevel drivers are GRO ready. 4402 NETIF_F_HW_VLAN_TX |
4431 */ 4403 NETIF_F_HW_VLAN_RX |
4432 bond_dev->features |= NETIF_F_GRO; 4404 NETIF_F_HW_VLAN_FILTER;
4405
4406 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM);
4407 bond_dev->features |= bond_dev->hw_features;
4433} 4408}
4434 4409
4435static void bond_work_cancel_all(struct bonding *bond) 4410static void bond_work_cancel_all(struct bonding *bond)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ec1953043102..d5a1f9e3794c 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -3373,8 +3373,8 @@ relink:
3373 tg3_phy_copper_begin(tp); 3373 tg3_phy_copper_begin(tp);
3374 3374
3375 tg3_readphy(tp, MII_BMSR, &bmsr); 3375 tg3_readphy(tp, MII_BMSR, &bmsr);
3376 if (!tg3_readphy(tp, MII_BMSR, &bmsr) && 3376 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
3377 (bmsr & BMSR_LSTATUS)) 3377 (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
3378 current_link_up = 1; 3378 current_link_up = 1;
3379 } 3379 }
3380 3380
@@ -6309,6 +6309,42 @@ dma_error:
6309 return NETDEV_TX_OK; 6309 return NETDEV_TX_OK;
6310} 6310}
6311 6311
6312static void tg3_set_loopback(struct net_device *dev, u32 features)
6313{
6314 struct tg3 *tp = netdev_priv(dev);
6315
6316 if (features & NETIF_F_LOOPBACK) {
6317 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
6318 return;
6319
6320 /*
6321 * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in
6322 * loopback mode if Half-Duplex mode was negotiated earlier.
6323 */
6324 tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
6325
6326 /* Enable internal MAC loopback mode */
6327 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
6328 spin_lock_bh(&tp->lock);
6329 tw32(MAC_MODE, tp->mac_mode);
6330 netif_carrier_on(tp->dev);
6331 spin_unlock_bh(&tp->lock);
6332 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
6333 } else {
6334 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
6335 return;
6336
6337 /* Disable internal MAC loopback mode */
6338 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
6339 spin_lock_bh(&tp->lock);
6340 tw32(MAC_MODE, tp->mac_mode);
6341 /* Force link status check */
6342 tg3_setup_phy(tp, 1);
6343 spin_unlock_bh(&tp->lock);
6344 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
6345 }
6346}
6347
6312static u32 tg3_fix_features(struct net_device *dev, u32 features) 6348static u32 tg3_fix_features(struct net_device *dev, u32 features)
6313{ 6349{
6314 struct tg3 *tp = netdev_priv(dev); 6350 struct tg3 *tp = netdev_priv(dev);
@@ -6319,6 +6355,16 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features)
6319 return features; 6355 return features;
6320} 6356}
6321 6357
6358static int tg3_set_features(struct net_device *dev, u32 features)
6359{
6360 u32 changed = dev->features ^ features;
6361
6362 if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
6363 tg3_set_loopback(dev, features);
6364
6365 return 0;
6366}
6367
6322static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, 6368static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
6323 int new_mtu) 6369 int new_mtu)
6324{ 6370{
@@ -9485,6 +9531,13 @@ static int tg3_open(struct net_device *dev)
9485 9531
9486 netif_tx_start_all_queues(dev); 9532 netif_tx_start_all_queues(dev);
9487 9533
9534 /*
9535 * Reset loopback feature if it was turned on while the device was down
9536 * make sure that it's installed properly now.
9537 */
9538 if (dev->features & NETIF_F_LOOPBACK)
9539 tg3_set_loopback(dev, dev->features);
9540
9488 return 0; 9541 return 0;
9489 9542
9490err_out3: 9543err_out3:
@@ -15033,6 +15086,7 @@ static const struct net_device_ops tg3_netdev_ops = {
15033 .ndo_tx_timeout = tg3_tx_timeout, 15086 .ndo_tx_timeout = tg3_tx_timeout,
15034 .ndo_change_mtu = tg3_change_mtu, 15087 .ndo_change_mtu = tg3_change_mtu,
15035 .ndo_fix_features = tg3_fix_features, 15088 .ndo_fix_features = tg3_fix_features,
15089 .ndo_set_features = tg3_set_features,
15036#ifdef CONFIG_NET_POLL_CONTROLLER 15090#ifdef CONFIG_NET_POLL_CONTROLLER
15037 .ndo_poll_controller = tg3_poll_controller, 15091 .ndo_poll_controller = tg3_poll_controller,
15038#endif 15092#endif
@@ -15049,6 +15103,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = {
15049 .ndo_do_ioctl = tg3_ioctl, 15103 .ndo_do_ioctl = tg3_ioctl,
15050 .ndo_tx_timeout = tg3_tx_timeout, 15104 .ndo_tx_timeout = tg3_tx_timeout,
15051 .ndo_change_mtu = tg3_change_mtu, 15105 .ndo_change_mtu = tg3_change_mtu,
15106 .ndo_set_features = tg3_set_features,
15052#ifdef CONFIG_NET_POLL_CONTROLLER 15107#ifdef CONFIG_NET_POLL_CONTROLLER
15053 .ndo_poll_controller = tg3_poll_controller, 15108 .ndo_poll_controller = tg3_poll_controller,
15054#endif 15109#endif
@@ -15246,6 +15301,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
15246 dev->features |= hw_features; 15301 dev->features |= hw_features;
15247 dev->vlan_features |= hw_features; 15302 dev->vlan_features |= hw_features;
15248 15303
15304 /*
15305 * Add loopback capability only for a subset of devices that support
15306 * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15307 * loopback for the remaining devices.
15308 */
15309 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15310 !tg3_flag(tp, CPMU_PRESENT))
15311 /* Add the loopback capability */
15312 dev->hw_features |= NETIF_F_LOOPBACK;
15313
15249 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 15314 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15250 !tg3_flag(tp, TSO_CAPABLE) && 15315 !tg3_flag(tp, TSO_CAPABLE) &&
15251 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 15316 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index 1033ef6476a4..4ab557d0287d 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -54,13 +54,13 @@
54#include <linux/usb/usbnet.h> 54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h> 55#include <linux/usb/cdc.h>
56 56
57#define DRIVER_VERSION "23-Apr-2011" 57#define DRIVER_VERSION "06-May-2011"
58 58
59/* CDC NCM subclass 3.2.1 */ 59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
61 61
62/* Maximum NTB length */ 62/* Maximum NTB length */
63#define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */ 63#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
64#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */ 64#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
65 65
66/* Minimum value for MaxDatagramSize, ch. 6.2.9 */ 66/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
@@ -722,7 +722,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
722 722
723 } else { 723 } else {
724 /* reset variables */ 724 /* reset variables */
725 skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC); 725 skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC);
726 if (skb_out == NULL) { 726 if (skb_out == NULL) {
727 if (skb != NULL) { 727 if (skb != NULL) {
728 dev_kfree_skb_any(skb); 728 dev_kfree_skb_any(skb);
@@ -861,8 +861,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
861 /* store last offset */ 861 /* store last offset */
862 last_offset = offset; 862 last_offset = offset;
863 863
864 if ((last_offset < ctx->tx_max) && ((last_offset % 864 if (((last_offset < ctx->tx_max) && ((last_offset %
865 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) { 865 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) ||
866 (((last_offset == ctx->tx_max) && ((ctx->tx_max %
867 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) &&
868 (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) {
866 /* force short packet */ 869 /* force short packet */
867 *(((u8 *)skb_out->data) + last_offset) = 0; 870 *(((u8 *)skb_out->data) + last_offset) = 0;
868 last_offset++; 871 last_offset++;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index c0da23096160..fa6e2ac7475a 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2884,6 +2884,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2884 int num_tx_queues; 2884 int num_tx_queues;
2885 int num_rx_queues; 2885 int num_rx_queues;
2886 2886
2887 if (!pci_msi_enabled())
2888 enable_mq = 0;
2889
2887#ifdef VMXNET3_RSS 2890#ifdef VMXNET3_RSS
2888 if (enable_mq) 2891 if (enable_mq)
2889 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, 2892 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 8ba7b5f67de2..f50d36fdf405 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,10 +68,10 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01001900 74#define VMXNET3_DRIVER_VERSION_NUM 0x01010900
75 75
76#if defined(CONFIG_PCI_MSI) 76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */ 77 /* RSS only makes sense if MSI-X is supported. */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e7244ed1f9a8..1d9696a9ee4d 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1097,10 +1097,14 @@ struct net_device {
1097 1097
1098#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN) 1098#define NETIF_F_ALL_TSO (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1099 1099
1100#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \
1101 NETIF_F_FSO)
1102
1100#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \ 1103#define NETIF_F_ALL_TX_OFFLOADS (NETIF_F_ALL_CSUM | NETIF_F_SG | \
1101 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 1104 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1102 NETIF_F_HIGHDMA | \ 1105 NETIF_F_HIGHDMA | \
1103 NETIF_F_SCTP_CSUM | NETIF_F_FCOE_CRC) 1106 NETIF_F_SCTP_CSUM | \
1107 NETIF_F_ALL_FCOE)
1104 1108
1105 /* 1109 /*
1106 * If one device supports one of these features, then enable them 1110 * If one device supports one of these features, then enable them
@@ -2561,6 +2565,7 @@ u32 netdev_increment_features(u32 all, u32 one, u32 mask);
2561u32 netdev_fix_features(struct net_device *dev, u32 features); 2565u32 netdev_fix_features(struct net_device *dev, u32 features);
2562int __netdev_update_features(struct net_device *dev); 2566int __netdev_update_features(struct net_device *dev);
2563void netdev_update_features(struct net_device *dev); 2567void netdev_update_features(struct net_device *dev);
2568void netdev_change_features(struct net_device *dev);
2564 2569
2565void netif_stacked_transfer_operstate(const struct net_device *rootdev, 2570void netif_stacked_transfer_operstate(const struct net_device *rootdev,
2566 struct net_device *dev); 2571 struct net_device *dev);
diff --git a/include/net/garp.h b/include/net/garp.h
index 8cabbf087169..834d8add9e5f 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -104,6 +104,7 @@ struct garp_applicant {
104 struct sk_buff_head queue; 104 struct sk_buff_head queue;
105 struct sk_buff *pdu; 105 struct sk_buff *pdu;
106 struct rb_root gid; 106 struct rb_root gid;
107 struct rcu_head rcu;
107}; 108};
108 109
109struct garp_port { 110struct garp_port {
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 9d1f510ab6d0..4fff432aeade 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -665,9 +665,7 @@ struct ip_vs_dest {
665 struct dst_entry *dst_cache; /* destination cache entry */ 665 struct dst_entry *dst_cache; /* destination cache entry */
666 u32 dst_rtos; /* RT_TOS(tos) for dst */ 666 u32 dst_rtos; /* RT_TOS(tos) for dst */
667 u32 dst_cookie; 667 u32 dst_cookie;
668#ifdef CONFIG_IP_VS_IPV6 668 union nf_inet_addr dst_saddr;
669 struct in6_addr dst_saddr;
670#endif
671 669
672 /* for virtual service */ 670 /* for virtual service */
673 struct ip_vs_service *svc; /* service it belongs to */ 671 struct ip_vs_service *svc; /* service it belongs to */
@@ -1253,7 +1251,8 @@ extern int ip_vs_tunnel_xmit
1253extern int ip_vs_dr_xmit 1251extern int ip_vs_dr_xmit
1254(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1252(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1255extern int ip_vs_icmp_xmit 1253extern int ip_vs_icmp_xmit
1256(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, int offset); 1254(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
1255 int offset, unsigned int hooknum);
1257extern void ip_vs_dst_reset(struct ip_vs_dest *dest); 1256extern void ip_vs_dst_reset(struct ip_vs_dest *dest);
1258 1257
1259#ifdef CONFIG_IP_VS_IPV6 1258#ifdef CONFIG_IP_VS_IPV6
@@ -1267,7 +1266,7 @@ extern int ip_vs_dr_xmit_v6
1267(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp); 1266(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp);
1268extern int ip_vs_icmp_xmit_v6 1267extern int ip_vs_icmp_xmit_v6
1269(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp, 1268(struct sk_buff *skb, struct ip_vs_conn *cp, struct ip_vs_protocol *pp,
1270 int offset); 1269 int offset, unsigned int hooknum);
1271#endif 1270#endif
1272 1271
1273#ifdef CONFIG_SYSCTL 1272#ifdef CONFIG_SYSCTL
diff --git a/net/802/garp.c b/net/802/garp.c
index 5dbe8967bbd5..f8300a8b5fbc 100644
--- a/net/802/garp.c
+++ b/net/802/garp.c
@@ -603,6 +603,11 @@ err1:
603} 603}
604EXPORT_SYMBOL_GPL(garp_init_applicant); 604EXPORT_SYMBOL_GPL(garp_init_applicant);
605 605
606static void garp_app_kfree_rcu(struct rcu_head *head)
607{
608 kfree(container_of(head, struct garp_applicant, rcu));
609}
610
606void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) 611void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl)
607{ 612{
608 struct garp_port *port = rtnl_dereference(dev->garp_port); 613 struct garp_port *port = rtnl_dereference(dev->garp_port);
@@ -611,7 +616,6 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
611 ASSERT_RTNL(); 616 ASSERT_RTNL();
612 617
613 rcu_assign_pointer(port->applicants[appl->type], NULL); 618 rcu_assign_pointer(port->applicants[appl->type], NULL);
614 synchronize_rcu();
615 619
616 /* Delete timer and generate a final TRANSMIT_PDU event to flush out 620 /* Delete timer and generate a final TRANSMIT_PDU event to flush out
617 * all pending messages before the applicant is gone. */ 621 * all pending messages before the applicant is gone. */
@@ -621,7 +625,7 @@ void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl
621 garp_queue_xmit(app); 625 garp_queue_xmit(app);
622 626
623 dev_mc_del(dev, appl->proto.group_address); 627 dev_mc_del(dev, appl->proto.group_address);
624 kfree(app); 628 call_rcu(&app->rcu, garp_app_kfree_rcu);
625 garp_release_port(dev); 629 garp_release_port(dev);
626} 630}
627EXPORT_SYMBOL_GPL(garp_uninit_applicant); 631EXPORT_SYMBOL_GPL(garp_uninit_applicant);
@@ -639,3 +643,9 @@ void garp_unregister_application(struct garp_application *appl)
639 stp_proto_unregister(&appl->proto); 643 stp_proto_unregister(&appl->proto);
640} 644}
641EXPORT_SYMBOL_GPL(garp_unregister_application); 645EXPORT_SYMBOL_GPL(garp_unregister_application);
646
647static void __exit garp_cleanup_module(void)
648{
649 rcu_barrier(); /* Wait for completion of call_rcu()'s */
650}
651module_exit(garp_cleanup_module);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 58f8010e1aef..f247f5bff88d 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -528,7 +528,7 @@ static int vlan_dev_init(struct net_device *dev)
528 (1<<__LINK_STATE_DORMANT))) | 528 (1<<__LINK_STATE_DORMANT))) |
529 (1<<__LINK_STATE_PRESENT); 529 (1<<__LINK_STATE_PRESENT);
530 530
531 dev->hw_features = real_dev->vlan_features & NETIF_F_ALL_TX_OFFLOADS; 531 dev->hw_features = NETIF_F_ALL_TX_OFFLOADS;
532 dev->features |= real_dev->vlan_features | NETIF_F_LLTX; 532 dev->features |= real_dev->vlan_features | NETIF_F_LLTX;
533 dev->gso_max_size = real_dev->gso_max_size; 533 dev->gso_max_size = real_dev->gso_max_size;
534 534
@@ -587,9 +587,11 @@ static u32 vlan_dev_fix_features(struct net_device *dev, u32 features)
587{ 587{
588 struct net_device *real_dev = vlan_dev_info(dev)->real_dev; 588 struct net_device *real_dev = vlan_dev_info(dev)->real_dev;
589 589
590 features &= (real_dev->features | NETIF_F_LLTX); 590 features &= real_dev->features;
591 features &= real_dev->vlan_features;
591 if (dev_ethtool_get_rx_csum(real_dev)) 592 if (dev_ethtool_get_rx_csum(real_dev))
592 features |= NETIF_F_RXCSUM; 593 features |= NETIF_F_RXCSUM;
594 features |= NETIF_F_LLTX;
593 595
594 return features; 596 return features;
595} 597}
diff --git a/net/core/dev.c b/net/core/dev.c
index 75898a32c038..ea23353e6251 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5289,6 +5289,14 @@ int __netdev_update_features(struct net_device *dev)
5289 return 1; 5289 return 1;
5290} 5290}
5291 5291
5292/**
5293 * netdev_update_features - recalculate device features
5294 * @dev: the device to check
5295 *
5296 * Recalculate dev->features set and send notifications if it
5297 * has changed. Should be called after driver or hardware dependent
5298 * conditions might have changed that influence the features.
5299 */
5292void netdev_update_features(struct net_device *dev) 5300void netdev_update_features(struct net_device *dev)
5293{ 5301{
5294 if (__netdev_update_features(dev)) 5302 if (__netdev_update_features(dev))
@@ -5297,6 +5305,23 @@ void netdev_update_features(struct net_device *dev)
5297EXPORT_SYMBOL(netdev_update_features); 5305EXPORT_SYMBOL(netdev_update_features);
5298 5306
5299/** 5307/**
5308 * netdev_change_features - recalculate device features
5309 * @dev: the device to check
5310 *
5311 * Recalculate dev->features set and send notifications even
5312 * if they have not changed. Should be called instead of
5313 * netdev_update_features() if also dev->vlan_features might
5314 * have changed to allow the changes to be propagated to stacked
5315 * VLAN devices.
5316 */
5317void netdev_change_features(struct net_device *dev)
5318{
5319 __netdev_update_features(dev);
5320 netdev_features_change(dev);
5321}
5322EXPORT_SYMBOL(netdev_change_features);
5323
5324/**
5300 * netif_stacked_transfer_operstate - transfer operstate 5325 * netif_stacked_transfer_operstate - transfer operstate
5301 * @rootdev: the root or lower level device to transfer state from 5326 * @rootdev: the root or lower level device to transfer state from
5302 * @dev: the device to transfer operstate to 5327 * @dev: the device to transfer operstate to
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index b6f405888538..b8c2b10f397a 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -361,7 +361,7 @@ static const char netdev_features_strings[ETHTOOL_DEV_FEATURE_WORDS * 32][ETH_GS
361 /* NETIF_F_NTUPLE */ "rx-ntuple-filter", 361 /* NETIF_F_NTUPLE */ "rx-ntuple-filter",
362 /* NETIF_F_RXHASH */ "rx-hashing", 362 /* NETIF_F_RXHASH */ "rx-hashing",
363 /* NETIF_F_RXCSUM */ "rx-checksum", 363 /* NETIF_F_RXCSUM */ "rx-checksum",
364 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy" 364 /* NETIF_F_NOCACHE_COPY */ "tx-nocache-copy",
365 /* NETIF_F_LOOPBACK */ "loopback", 365 /* NETIF_F_LOOPBACK */ "loopback",
366}; 366};
367 367
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 99461f09320f..fcbc0c8f1261 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -84,7 +84,7 @@ int ip_forward(struct sk_buff *skb)
84 84
85 rt = skb_rtable(skb); 85 rt = skb_rtable(skb);
86 86
87 if (opt->is_strictroute && rt->rt_dst != rt->rt_gateway) 87 if (opt->is_strictroute && iph->daddr != rt->rt_gateway)
88 goto sr_failed; 88 goto sr_failed;
89 89
90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) && 90 if (unlikely(skb->len > dst_mtu(&rt->dst) && !skb_is_gso(skb) &&
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index 01fc40965848..c5c26192b057 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -601,7 +601,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
601 unsigned long orefdst; 601 unsigned long orefdst;
602 int err; 602 int err;
603 603
604 if (!opt->srr || !rt) 604 if (!rt)
605 return 0; 605 return 0;
606 606
607 if (skb->pkt_type != PACKET_HOST) 607 if (skb->pkt_type != PACKET_HOST)
@@ -635,7 +635,7 @@ int ip_options_rcv_srr(struct sk_buff *skb)
635 if (rt2->rt_type != RTN_LOCAL) 635 if (rt2->rt_type != RTN_LOCAL)
636 break; 636 break;
637 /* Superfast 8) loopback forward */ 637 /* Superfast 8) loopback forward */
638 memcpy(&iph->daddr, &optptr[srrptr-1], 4); 638 iph->daddr = nexthop;
639 opt->is_changed = 1; 639 opt->is_changed = 1;
640 } 640 }
641 if (srrptr <= srrspace) { 641 if (srrptr <= srrspace) {
diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
index a39cca8331df..b3cc8b3989a9 100644
--- a/net/irda/ircomm/ircomm_tty.c
+++ b/net/irda/ircomm/ircomm_tty.c
@@ -38,6 +38,7 @@
38#include <linux/seq_file.h> 38#include <linux/seq_file.h>
39#include <linux/termios.h> 39#include <linux/termios.h>
40#include <linux/tty.h> 40#include <linux/tty.h>
41#include <linux/tty_flip.h>
41#include <linux/interrupt.h> 42#include <linux/interrupt.h>
42#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */ 43#include <linux/device.h> /* for MODULE_ALIAS_CHARDEV_MAJOR */
43 44
@@ -1132,7 +1133,6 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1132 struct sk_buff *skb) 1133 struct sk_buff *skb)
1133{ 1134{
1134 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance; 1135 struct ircomm_tty_cb *self = (struct ircomm_tty_cb *) instance;
1135 struct tty_ldisc *ld;
1136 1136
1137 IRDA_DEBUG(2, "%s()\n", __func__ ); 1137 IRDA_DEBUG(2, "%s()\n", __func__ );
1138 1138
@@ -1161,15 +1161,11 @@ static int ircomm_tty_data_indication(void *instance, void *sap,
1161 } 1161 }
1162 1162
1163 /* 1163 /*
1164 * Just give it over to the line discipline. There is no need to 1164 * Use flip buffer functions since the code may be called from interrupt
1165 * involve the flip buffers, since we are not running in an interrupt 1165 * context
1166 * handler
1167 */ 1166 */
1168 1167 tty_insert_flip_string(self->tty, skb->data, skb->len);
1169 ld = tty_ldisc_ref(self->tty); 1168 tty_flip_buffer_push(self->tty);
1170 if (ld)
1171 ld->ops->receive_buf(self->tty, skb->data, NULL, skb->len);
1172 tty_ldisc_deref(ld);
1173 1169
1174 /* No need to kfree_skb - see ircomm_ttp_data_indication() */ 1170 /* No need to kfree_skb - see ircomm_ttp_data_indication() */
1175 1171
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 9be095e00450..ed8a2335442f 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -1435,16 +1435,15 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32
1435 1435
1436 /* Add tunnel to our list */ 1436 /* Add tunnel to our list */
1437 INIT_LIST_HEAD(&tunnel->list); 1437 INIT_LIST_HEAD(&tunnel->list);
1438 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1439 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1440 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1441 synchronize_rcu();
1442 atomic_inc(&l2tp_tunnel_count); 1438 atomic_inc(&l2tp_tunnel_count);
1443 1439
1444 /* Bump the reference count. The tunnel context is deleted 1440 /* Bump the reference count. The tunnel context is deleted
1445 * only when this drops to zero. 1441 * only when this drops to zero. Must be done before list insertion
1446 */ 1442 */
1447 l2tp_tunnel_inc_refcount(tunnel); 1443 l2tp_tunnel_inc_refcount(tunnel);
1444 spin_lock_bh(&pn->l2tp_tunnel_list_lock);
1445 list_add_rcu(&tunnel->list, &pn->l2tp_tunnel_list);
1446 spin_unlock_bh(&pn->l2tp_tunnel_list_lock);
1448 1447
1449 err = 0; 1448 err = 0;
1450err: 1449err:
@@ -1636,7 +1635,6 @@ struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunn
1636 hlist_add_head_rcu(&session->global_hlist, 1635 hlist_add_head_rcu(&session->global_hlist,
1637 l2tp_session_id_hash_2(pn, session_id)); 1636 l2tp_session_id_hash_2(pn, session_id));
1638 spin_unlock_bh(&pn->l2tp_session_hlist_lock); 1637 spin_unlock_bh(&pn->l2tp_session_hlist_lock);
1639 synchronize_rcu();
1640 } 1638 }
1641 1639
1642 /* Ignore management session in session count value */ 1640 /* Ignore management session in session count value */
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index a74dae6c5dbc..bfa808f4da13 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1382,15 +1382,7 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)
1382 ip_vs_in_stats(cp, skb); 1382 ip_vs_in_stats(cp, skb);
1383 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol) 1383 if (IPPROTO_TCP == cih->protocol || IPPROTO_UDP == cih->protocol)
1384 offset += 2 * sizeof(__u16); 1384 offset += 2 * sizeof(__u16);
1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset); 1385 verdict = ip_vs_icmp_xmit(skb, cp, pp, offset, hooknum);
1386 /* LOCALNODE from FORWARD hook is not supported */
1387 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1388 skb_rtable(skb)->rt_flags & RTCF_LOCAL) {
1389 IP_VS_DBG(1, "%s(): "
1390 "local delivery to %pI4 but in FORWARD\n",
1391 __func__, &skb_rtable(skb)->rt_dst);
1392 verdict = NF_DROP;
1393 }
1394 1386
1395 out: 1387 out:
1396 __ip_vs_conn_put(cp); 1388 __ip_vs_conn_put(cp);
@@ -1412,7 +1404,6 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1412 struct ip_vs_protocol *pp; 1404 struct ip_vs_protocol *pp;
1413 struct ip_vs_proto_data *pd; 1405 struct ip_vs_proto_data *pd;
1414 unsigned int offset, verdict; 1406 unsigned int offset, verdict;
1415 struct rt6_info *rt;
1416 1407
1417 *related = 1; 1408 *related = 1;
1418 1409
@@ -1474,23 +1465,12 @@ ip_vs_in_icmp_v6(struct sk_buff *skb, int *related, unsigned int hooknum)
1474 if (!cp) 1465 if (!cp)
1475 return NF_ACCEPT; 1466 return NF_ACCEPT;
1476 1467
1477 verdict = NF_DROP;
1478
1479 /* do the statistics and put it back */ 1468 /* do the statistics and put it back */
1480 ip_vs_in_stats(cp, skb); 1469 ip_vs_in_stats(cp, skb);
1481 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr || 1470 if (IPPROTO_TCP == cih->nexthdr || IPPROTO_UDP == cih->nexthdr ||
1482 IPPROTO_SCTP == cih->nexthdr) 1471 IPPROTO_SCTP == cih->nexthdr)
1483 offset += 2 * sizeof(__u16); 1472 offset += 2 * sizeof(__u16);
1484 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset); 1473 verdict = ip_vs_icmp_xmit_v6(skb, cp, pp, offset, hooknum);
1485 /* LOCALNODE from FORWARD hook is not supported */
1486 if (verdict == NF_ACCEPT && hooknum == NF_INET_FORWARD &&
1487 (rt = (struct rt6_info *) skb_dst(skb)) &&
1488 rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK) {
1489 IP_VS_DBG(1, "%s(): "
1490 "local delivery to %pI6 but in FORWARD\n",
1491 __func__, &rt->rt6i_dst);
1492 verdict = NF_DROP;
1493 }
1494 1474
1495 __ip_vs_conn_put(cp); 1475 __ip_vs_conn_put(cp);
1496 1476
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 6132b213eddc..ee319a4338b0 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -87,7 +87,7 @@ __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
87/* Get route to destination or remote server */ 87/* Get route to destination or remote server */
88static struct rtable * 88static struct rtable *
89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest, 89__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
90 __be32 daddr, u32 rtos, int rt_mode) 90 __be32 daddr, u32 rtos, int rt_mode, __be32 *ret_saddr)
91{ 91{
92 struct net *net = dev_net(skb_dst(skb)->dev); 92 struct net *net = dev_net(skb_dst(skb)->dev);
93 struct rtable *rt; /* Route to the other host */ 93 struct rtable *rt; /* Route to the other host */
@@ -98,7 +98,12 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
98 spin_lock(&dest->dst_lock); 98 spin_lock(&dest->dst_lock);
99 if (!(rt = (struct rtable *) 99 if (!(rt = (struct rtable *)
100 __ip_vs_dst_check(dest, rtos))) { 100 __ip_vs_dst_check(dest, rtos))) {
101 rt = ip_route_output(net, dest->addr.ip, 0, rtos, 0); 101 struct flowi4 fl4;
102
103 memset(&fl4, 0, sizeof(fl4));
104 fl4.daddr = dest->addr.ip;
105 fl4.flowi4_tos = rtos;
106 rt = ip_route_output_key(net, &fl4);
102 if (IS_ERR(rt)) { 107 if (IS_ERR(rt)) {
103 spin_unlock(&dest->dst_lock); 108 spin_unlock(&dest->dst_lock);
104 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 109 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
@@ -106,18 +111,30 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
106 return NULL; 111 return NULL;
107 } 112 }
108 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0); 113 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
109 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n", 114 dest->dst_saddr.ip = fl4.saddr;
110 &dest->addr.ip, 115 IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d, "
116 "rtos=%X\n",
117 &dest->addr.ip, &dest->dst_saddr.ip,
111 atomic_read(&rt->dst.__refcnt), rtos); 118 atomic_read(&rt->dst.__refcnt), rtos);
112 } 119 }
120 daddr = dest->addr.ip;
121 if (ret_saddr)
122 *ret_saddr = dest->dst_saddr.ip;
113 spin_unlock(&dest->dst_lock); 123 spin_unlock(&dest->dst_lock);
114 } else { 124 } else {
115 rt = ip_route_output(net, daddr, 0, rtos, 0); 125 struct flowi4 fl4;
126
127 memset(&fl4, 0, sizeof(fl4));
128 fl4.daddr = daddr;
129 fl4.flowi4_tos = rtos;
130 rt = ip_route_output_key(net, &fl4);
116 if (IS_ERR(rt)) { 131 if (IS_ERR(rt)) {
117 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", 132 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
118 &daddr); 133 &daddr);
119 return NULL; 134 return NULL;
120 } 135 }
136 if (ret_saddr)
137 *ret_saddr = fl4.saddr;
121 } 138 }
122 139
123 local = rt->rt_flags & RTCF_LOCAL; 140 local = rt->rt_flags & RTCF_LOCAL;
@@ -125,7 +142,7 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
125 rt_mode)) { 142 rt_mode)) {
126 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n", 143 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
127 (rt->rt_flags & RTCF_LOCAL) ? 144 (rt->rt_flags & RTCF_LOCAL) ?
128 "local":"non-local", &rt->rt_dst); 145 "local":"non-local", &daddr);
129 ip_rt_put(rt); 146 ip_rt_put(rt);
130 return NULL; 147 return NULL;
131 } 148 }
@@ -133,14 +150,14 @@ __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
133 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) { 150 !((ort = skb_rtable(skb)) && ort->rt_flags & RTCF_LOCAL)) {
134 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local " 151 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
135 "requires NAT method, dest: %pI4\n", 152 "requires NAT method, dest: %pI4\n",
136 &ip_hdr(skb)->daddr, &rt->rt_dst); 153 &ip_hdr(skb)->daddr, &daddr);
137 ip_rt_put(rt); 154 ip_rt_put(rt);
138 return NULL; 155 return NULL;
139 } 156 }
140 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) { 157 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
141 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 " 158 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
142 "to non-local address, dest: %pI4\n", 159 "to non-local address, dest: %pI4\n",
143 &ip_hdr(skb)->saddr, &rt->rt_dst); 160 &ip_hdr(skb)->saddr, &daddr);
144 ip_rt_put(rt); 161 ip_rt_put(rt);
145 return NULL; 162 return NULL;
146 } 163 }
@@ -229,8 +246,6 @@ out_err:
229 246
230/* 247/*
231 * Get route to destination or remote server 248 * Get route to destination or remote server
232 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
233 * &4=Allow redirect from remote daddr to local
234 */ 249 */
235static struct rt6_info * 250static struct rt6_info *
236__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest, 251__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
@@ -250,7 +265,7 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
250 u32 cookie; 265 u32 cookie;
251 266
252 dst = __ip_vs_route_output_v6(net, &dest->addr.in6, 267 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
253 &dest->dst_saddr, 268 &dest->dst_saddr.in6,
254 do_xfrm); 269 do_xfrm);
255 if (!dst) { 270 if (!dst) {
256 spin_unlock(&dest->dst_lock); 271 spin_unlock(&dest->dst_lock);
@@ -260,11 +275,11 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
260 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0; 275 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
261 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie); 276 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
262 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n", 277 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
263 &dest->addr.in6, &dest->dst_saddr, 278 &dest->addr.in6, &dest->dst_saddr.in6,
264 atomic_read(&rt->dst.__refcnt)); 279 atomic_read(&rt->dst.__refcnt));
265 } 280 }
266 if (ret_saddr) 281 if (ret_saddr)
267 ipv6_addr_copy(ret_saddr, &dest->dst_saddr); 282 ipv6_addr_copy(ret_saddr, &dest->dst_saddr.in6);
268 spin_unlock(&dest->dst_lock); 283 spin_unlock(&dest->dst_lock);
269 } else { 284 } else {
270 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm); 285 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
@@ -274,13 +289,14 @@ __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
274 } 289 }
275 290
276 local = __ip_vs_is_local_route6(rt); 291 local = __ip_vs_is_local_route6(rt);
277 if (!((local ? 1 : 2) & rt_mode)) { 292 if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
293 rt_mode)) {
278 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n", 294 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
279 local ? "local":"non-local", daddr); 295 local ? "local":"non-local", daddr);
280 dst_release(&rt->dst); 296 dst_release(&rt->dst);
281 return NULL; 297 return NULL;
282 } 298 }
283 if (local && !(rt_mode & 4) && 299 if (local && !(rt_mode & IP_VS_RT_MODE_RDR) &&
284 !((ort = (struct rt6_info *) skb_dst(skb)) && 300 !((ort = (struct rt6_info *) skb_dst(skb)) &&
285 __ip_vs_is_local_route6(ort))) { 301 __ip_vs_is_local_route6(ort))) {
286 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local " 302 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
@@ -386,7 +402,7 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
386 EnterFunction(10); 402 EnterFunction(10);
387 403
388 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos), 404 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr, RT_TOS(iph->tos),
389 IP_VS_RT_MODE_NON_LOCAL))) 405 IP_VS_RT_MODE_NON_LOCAL, NULL)))
390 goto tx_error_icmp; 406 goto tx_error_icmp;
391 407
392 /* MTU checking */ 408 /* MTU checking */
@@ -440,7 +456,8 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
440 456
441 EnterFunction(10); 457 EnterFunction(10);
442 458
443 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2))) 459 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0,
460 IP_VS_RT_MODE_NON_LOCAL)))
444 goto tx_error_icmp; 461 goto tx_error_icmp;
445 462
446 /* MTU checking */ 463 /* MTU checking */
@@ -517,7 +534,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
517 RT_TOS(iph->tos), 534 RT_TOS(iph->tos),
518 IP_VS_RT_MODE_LOCAL | 535 IP_VS_RT_MODE_LOCAL |
519 IP_VS_RT_MODE_NON_LOCAL | 536 IP_VS_RT_MODE_NON_LOCAL |
520 IP_VS_RT_MODE_RDR))) 537 IP_VS_RT_MODE_RDR, NULL)))
521 goto tx_error_icmp; 538 goto tx_error_icmp;
522 local = rt->rt_flags & RTCF_LOCAL; 539 local = rt->rt_flags & RTCF_LOCAL;
523 /* 540 /*
@@ -539,7 +556,7 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
539#endif 556#endif
540 557
541 /* From world but DNAT to loopback address? */ 558 /* From world but DNAT to loopback address? */
542 if (local && ipv4_is_loopback(rt->rt_dst) && 559 if (local && ipv4_is_loopback(cp->daddr.ip) &&
543 rt_is_input_route(skb_rtable(skb))) { 560 rt_is_input_route(skb_rtable(skb))) {
544 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): " 561 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
545 "stopping DNAT to loopback address"); 562 "stopping DNAT to loopback address");
@@ -632,7 +649,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
632 } 649 }
633 650
634 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 651 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
635 0, 1|2|4))) 652 0, (IP_VS_RT_MODE_LOCAL |
653 IP_VS_RT_MODE_NON_LOCAL |
654 IP_VS_RT_MODE_RDR))))
636 goto tx_error_icmp; 655 goto tx_error_icmp;
637 local = __ip_vs_is_local_route6(rt); 656 local = __ip_vs_is_local_route6(rt);
638 /* 657 /*
@@ -748,6 +767,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
748 struct ip_vs_protocol *pp) 767 struct ip_vs_protocol *pp)
749{ 768{
750 struct rtable *rt; /* Route to the other host */ 769 struct rtable *rt; /* Route to the other host */
770 __be32 saddr; /* Source for tunnel */
751 struct net_device *tdev; /* Device to other host */ 771 struct net_device *tdev; /* Device to other host */
752 struct iphdr *old_iph = ip_hdr(skb); 772 struct iphdr *old_iph = ip_hdr(skb);
753 u8 tos = old_iph->tos; 773 u8 tos = old_iph->tos;
@@ -761,7 +781,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
761 781
762 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 782 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
763 RT_TOS(tos), IP_VS_RT_MODE_LOCAL | 783 RT_TOS(tos), IP_VS_RT_MODE_LOCAL |
764 IP_VS_RT_MODE_NON_LOCAL))) 784 IP_VS_RT_MODE_NON_LOCAL,
785 &saddr)))
765 goto tx_error_icmp; 786 goto tx_error_icmp;
766 if (rt->rt_flags & RTCF_LOCAL) { 787 if (rt->rt_flags & RTCF_LOCAL) {
767 ip_rt_put(rt); 788 ip_rt_put(rt);
@@ -829,8 +850,8 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
829 iph->frag_off = df; 850 iph->frag_off = df;
830 iph->protocol = IPPROTO_IPIP; 851 iph->protocol = IPPROTO_IPIP;
831 iph->tos = tos; 852 iph->tos = tos;
832 iph->daddr = rt->rt_dst; 853 iph->daddr = cp->daddr.ip;
833 iph->saddr = rt->rt_src; 854 iph->saddr = saddr;
834 iph->ttl = old_iph->ttl; 855 iph->ttl = old_iph->ttl;
835 ip_select_ident(iph, &rt->dst, NULL); 856 ip_select_ident(iph, &rt->dst, NULL);
836 857
@@ -875,7 +896,8 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
875 EnterFunction(10); 896 EnterFunction(10);
876 897
877 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, 898 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
878 &saddr, 1, 1|2))) 899 &saddr, 1, (IP_VS_RT_MODE_LOCAL |
900 IP_VS_RT_MODE_NON_LOCAL))))
879 goto tx_error_icmp; 901 goto tx_error_icmp;
880 if (__ip_vs_is_local_route6(rt)) { 902 if (__ip_vs_is_local_route6(rt)) {
881 dst_release(&rt->dst); 903 dst_release(&rt->dst);
@@ -992,7 +1014,7 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
992 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1014 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
993 RT_TOS(iph->tos), 1015 RT_TOS(iph->tos),
994 IP_VS_RT_MODE_LOCAL | 1016 IP_VS_RT_MODE_LOCAL |
995 IP_VS_RT_MODE_NON_LOCAL))) 1017 IP_VS_RT_MODE_NON_LOCAL, NULL)))
996 goto tx_error_icmp; 1018 goto tx_error_icmp;
997 if (rt->rt_flags & RTCF_LOCAL) { 1019 if (rt->rt_flags & RTCF_LOCAL) {
998 ip_rt_put(rt); 1020 ip_rt_put(rt);
@@ -1050,7 +1072,8 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1050 EnterFunction(10); 1072 EnterFunction(10);
1051 1073
1052 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1074 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1053 0, 1|2))) 1075 0, (IP_VS_RT_MODE_LOCAL |
1076 IP_VS_RT_MODE_NON_LOCAL))))
1054 goto tx_error_icmp; 1077 goto tx_error_icmp;
1055 if (__ip_vs_is_local_route6(rt)) { 1078 if (__ip_vs_is_local_route6(rt)) {
1056 dst_release(&rt->dst); 1079 dst_release(&rt->dst);
@@ -1109,12 +1132,13 @@ tx_error:
1109 */ 1132 */
1110int 1133int
1111ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, 1134ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1112 struct ip_vs_protocol *pp, int offset) 1135 struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
1113{ 1136{
1114 struct rtable *rt; /* Route to the other host */ 1137 struct rtable *rt; /* Route to the other host */
1115 int mtu; 1138 int mtu;
1116 int rc; 1139 int rc;
1117 int local; 1140 int local;
1141 int rt_mode;
1118 1142
1119 EnterFunction(10); 1143 EnterFunction(10);
1120 1144
@@ -1135,11 +1159,13 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1135 * mangle and send the packet here (only for VS/NAT) 1159 * mangle and send the packet here (only for VS/NAT)
1136 */ 1160 */
1137 1161
1162 /* LOCALNODE from FORWARD hook is not supported */
1163 rt_mode = (hooknum != NF_INET_FORWARD) ?
1164 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1165 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1138 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, 1166 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1139 RT_TOS(ip_hdr(skb)->tos), 1167 RT_TOS(ip_hdr(skb)->tos),
1140 IP_VS_RT_MODE_LOCAL | 1168 rt_mode, NULL)))
1141 IP_VS_RT_MODE_NON_LOCAL |
1142 IP_VS_RT_MODE_RDR)))
1143 goto tx_error_icmp; 1169 goto tx_error_icmp;
1144 local = rt->rt_flags & RTCF_LOCAL; 1170 local = rt->rt_flags & RTCF_LOCAL;
1145 1171
@@ -1162,7 +1188,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1162#endif 1188#endif
1163 1189
1164 /* From world but DNAT to loopback address? */ 1190 /* From world but DNAT to loopback address? */
1165 if (local && ipv4_is_loopback(rt->rt_dst) && 1191 if (local && ipv4_is_loopback(cp->daddr.ip) &&
1166 rt_is_input_route(skb_rtable(skb))) { 1192 rt_is_input_route(skb_rtable(skb))) {
1167 IP_VS_DBG(1, "%s(): " 1193 IP_VS_DBG(1, "%s(): "
1168 "stopping DNAT to loopback %pI4\n", 1194 "stopping DNAT to loopback %pI4\n",
@@ -1227,12 +1253,13 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1227#ifdef CONFIG_IP_VS_IPV6 1253#ifdef CONFIG_IP_VS_IPV6
1228int 1254int
1229ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, 1255ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1230 struct ip_vs_protocol *pp, int offset) 1256 struct ip_vs_protocol *pp, int offset, unsigned int hooknum)
1231{ 1257{
1232 struct rt6_info *rt; /* Route to the other host */ 1258 struct rt6_info *rt; /* Route to the other host */
1233 int mtu; 1259 int mtu;
1234 int rc; 1260 int rc;
1235 int local; 1261 int local;
1262 int rt_mode;
1236 1263
1237 EnterFunction(10); 1264 EnterFunction(10);
1238 1265
@@ -1253,8 +1280,12 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1253 * mangle and send the packet here (only for VS/NAT) 1280 * mangle and send the packet here (only for VS/NAT)
1254 */ 1281 */
1255 1282
1283 /* LOCALNODE from FORWARD hook is not supported */
1284 rt_mode = (hooknum != NF_INET_FORWARD) ?
1285 IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
1286 IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
1256 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL, 1287 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1257 0, 1|2|4))) 1288 0, rt_mode)))
1258 goto tx_error_icmp; 1289 goto tx_error_icmp;
1259 1290
1260 local = __ip_vs_is_local_route6(rt); 1291 local = __ip_vs_is_local_route6(rt);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 33d9ee629b4e..6766913a53e6 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1496,7 +1496,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1496 struct sctp_chunk *chunk; 1496 struct sctp_chunk *chunk;
1497 union sctp_addr to; 1497 union sctp_addr to;
1498 struct sockaddr *msg_name = NULL; 1498 struct sockaddr *msg_name = NULL;
1499 struct sctp_sndrcvinfo default_sinfo = { 0 }; 1499 struct sctp_sndrcvinfo default_sinfo;
1500 struct sctp_sndrcvinfo *sinfo; 1500 struct sctp_sndrcvinfo *sinfo;
1501 struct sctp_initmsg *sinit; 1501 struct sctp_initmsg *sinit;
1502 sctp_assoc_t associd = 0; 1502 sctp_assoc_t associd = 0;
@@ -1760,6 +1760,7 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1760 /* If the user didn't specify SNDRCVINFO, make up one with 1760 /* If the user didn't specify SNDRCVINFO, make up one with
1761 * some defaults. 1761 * some defaults.
1762 */ 1762 */
1763 memset(&default_sinfo, 0, sizeof(default_sinfo));
1763 default_sinfo.sinfo_stream = asoc->default_stream; 1764 default_sinfo.sinfo_stream = asoc->default_stream;
1764 default_sinfo.sinfo_flags = asoc->default_flags; 1765 default_sinfo.sinfo_flags = asoc->default_flags;
1765 default_sinfo.sinfo_ppid = asoc->default_ppid; 1766 default_sinfo.sinfo_ppid = asoc->default_ppid;
@@ -1790,12 +1791,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
1790 goto out_free; 1791 goto out_free;
1791 } 1792 }
1792 1793
1793 if (sinfo) { 1794 /* Check for invalid stream. */
1794 /* Check for invalid stream. */ 1795 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) {
1795 if (sinfo->sinfo_stream >= asoc->c.sinit_num_ostreams) { 1796 err = -EINVAL;
1796 err = -EINVAL; 1797 goto out_free;
1797 goto out_free;
1798 }
1799 } 1798 }
1800 1799
1801 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); 1800 timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);