diff options
author | David S. Miller <davem@davemloft.net> | 2011-05-12 23:01:55 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2011-05-12 23:01:55 -0400 |
commit | 5c5095494fb545f53b80cbb7539679a10a3472a6 (patch) | |
tree | d7c40cd66a58030ddef369bcb9acd8d95e2ac864 /drivers/net | |
parent | 4d586b823acc46c55c889ae1798de236c9d403da (diff) | |
parent | def57687e9579b7a797681990dff763c411f5347 (diff) |
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-next-2.6
Diffstat (limited to 'drivers/net')
-rw-r--r-- | drivers/net/bonding/bond_main.c | 157 | ||||
-rw-r--r-- | drivers/net/tg3.c | 69 | ||||
-rw-r--r-- | drivers/net/usb/cdc_ncm.c | 13 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_drv.c | 3 | ||||
-rw-r--r-- | drivers/net/vmxnet3/vmxnet3_int.h | 4 |
5 files changed, 146 insertions, 100 deletions
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6312db1f783..088fd845ffd 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -344,32 +344,6 @@ out: | |||
344 | } | 344 | } |
345 | 345 | ||
346 | /** | 346 | /** |
347 | * bond_has_challenged_slaves | ||
348 | * @bond: the bond we're working on | ||
349 | * | ||
350 | * Searches the slave list. Returns 1 if a vlan challenged slave | ||
351 | * was found, 0 otherwise. | ||
352 | * | ||
353 | * Assumes bond->lock is held. | ||
354 | */ | ||
355 | static int bond_has_challenged_slaves(struct bonding *bond) | ||
356 | { | ||
357 | struct slave *slave; | ||
358 | int i; | ||
359 | |||
360 | bond_for_each_slave(bond, slave, i) { | ||
361 | if (slave->dev->features & NETIF_F_VLAN_CHALLENGED) { | ||
362 | pr_debug("found VLAN challenged slave - %s\n", | ||
363 | slave->dev->name); | ||
364 | return 1; | ||
365 | } | ||
366 | } | ||
367 | |||
368 | pr_debug("no VLAN challenged slaves found\n"); | ||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | /** | ||
373 | * bond_next_vlan - safely skip to the next item in the vlans list. | 347 | * bond_next_vlan - safely skip to the next item in the vlans list. |
374 | * @bond: the bond we're working on | 348 | * @bond: the bond we're working on |
375 | * @curr: item we're advancing from | 349 | * @curr: item we're advancing from |
@@ -1406,52 +1380,68 @@ static int bond_sethwaddr(struct net_device *bond_dev, | |||
1406 | return 0; | 1380 | return 0; |
1407 | } | 1381 | } |
1408 | 1382 | ||
1409 | #define BOND_VLAN_FEATURES \ | 1383 | static u32 bond_fix_features(struct net_device *dev, u32 features) |
1410 | (NETIF_F_VLAN_CHALLENGED | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX | \ | ||
1411 | NETIF_F_HW_VLAN_FILTER) | ||
1412 | |||
1413 | /* | ||
1414 | * Compute the common dev->feature set available to all slaves. Some | ||
1415 | * feature bits are managed elsewhere, so preserve those feature bits | ||
1416 | * on the master device. | ||
1417 | */ | ||
1418 | static int bond_compute_features(struct bonding *bond) | ||
1419 | { | 1384 | { |
1420 | struct slave *slave; | 1385 | struct slave *slave; |
1421 | struct net_device *bond_dev = bond->dev; | 1386 | struct bonding *bond = netdev_priv(dev); |
1422 | u32 features = bond_dev->features; | 1387 | u32 mask; |
1423 | u32 vlan_features = 0; | ||
1424 | unsigned short max_hard_header_len = max((u16)ETH_HLEN, | ||
1425 | bond_dev->hard_header_len); | ||
1426 | int i; | 1388 | int i; |
1427 | 1389 | ||
1428 | features &= ~(NETIF_F_ALL_CSUM | BOND_VLAN_FEATURES); | 1390 | read_lock(&bond->lock); |
1429 | features |= NETIF_F_GSO_MASK | NETIF_F_NO_CSUM | NETIF_F_NOCACHE_COPY; | ||
1430 | 1391 | ||
1431 | if (!bond->first_slave) | 1392 | if (!bond->first_slave) { |
1432 | goto done; | 1393 | /* Disable adding VLANs to empty bond. But why? --mq */ |
1394 | features |= NETIF_F_VLAN_CHALLENGED; | ||
1395 | goto out; | ||
1396 | } | ||
1433 | 1397 | ||
1398 | mask = features; | ||
1434 | features &= ~NETIF_F_ONE_FOR_ALL; | 1399 | features &= ~NETIF_F_ONE_FOR_ALL; |
1400 | features |= NETIF_F_ALL_FOR_ALL; | ||
1435 | 1401 | ||
1436 | vlan_features = bond->first_slave->dev->vlan_features; | ||
1437 | bond_for_each_slave(bond, slave, i) { | 1402 | bond_for_each_slave(bond, slave, i) { |
1438 | features = netdev_increment_features(features, | 1403 | features = netdev_increment_features(features, |
1439 | slave->dev->features, | 1404 | slave->dev->features, |
1440 | NETIF_F_ONE_FOR_ALL); | 1405 | mask); |
1406 | } | ||
1407 | |||
1408 | out: | ||
1409 | read_unlock(&bond->lock); | ||
1410 | return features; | ||
1411 | } | ||
1412 | |||
1413 | #define BOND_VLAN_FEATURES (NETIF_F_ALL_TX_OFFLOADS | \ | ||
1414 | NETIF_F_SOFT_FEATURES | \ | ||
1415 | NETIF_F_LRO) | ||
1416 | |||
1417 | static void bond_compute_features(struct bonding *bond) | ||
1418 | { | ||
1419 | struct slave *slave; | ||
1420 | struct net_device *bond_dev = bond->dev; | ||
1421 | u32 vlan_features = BOND_VLAN_FEATURES; | ||
1422 | unsigned short max_hard_header_len = ETH_HLEN; | ||
1423 | int i; | ||
1424 | |||
1425 | read_lock(&bond->lock); | ||
1426 | |||
1427 | if (!bond->first_slave) | ||
1428 | goto done; | ||
1429 | |||
1430 | bond_for_each_slave(bond, slave, i) { | ||
1441 | vlan_features = netdev_increment_features(vlan_features, | 1431 | vlan_features = netdev_increment_features(vlan_features, |
1442 | slave->dev->vlan_features, | 1432 | slave->dev->vlan_features, BOND_VLAN_FEATURES); |
1443 | NETIF_F_ONE_FOR_ALL); | 1433 | |
1444 | if (slave->dev->hard_header_len > max_hard_header_len) | 1434 | if (slave->dev->hard_header_len > max_hard_header_len) |
1445 | max_hard_header_len = slave->dev->hard_header_len; | 1435 | max_hard_header_len = slave->dev->hard_header_len; |
1446 | } | 1436 | } |
1447 | 1437 | ||
1448 | done: | 1438 | done: |
1449 | features |= (bond_dev->features & BOND_VLAN_FEATURES); | 1439 | bond_dev->vlan_features = vlan_features; |
1450 | bond_dev->features = netdev_fix_features(bond_dev, features); | ||
1451 | bond_dev->vlan_features = netdev_fix_features(bond_dev, vlan_features); | ||
1452 | bond_dev->hard_header_len = max_hard_header_len; | 1440 | bond_dev->hard_header_len = max_hard_header_len; |
1453 | 1441 | ||
1454 | return 0; | 1442 | read_unlock(&bond->lock); |
1443 | |||
1444 | netdev_change_features(bond_dev); | ||
1455 | } | 1445 | } |
1456 | 1446 | ||
1457 | static void bond_setup_by_slave(struct net_device *bond_dev, | 1447 | static void bond_setup_by_slave(struct net_device *bond_dev, |
@@ -1544,7 +1534,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1544 | struct netdev_hw_addr *ha; | 1534 | struct netdev_hw_addr *ha; |
1545 | struct sockaddr addr; | 1535 | struct sockaddr addr; |
1546 | int link_reporting; | 1536 | int link_reporting; |
1547 | int old_features = bond_dev->features; | ||
1548 | int res = 0; | 1537 | int res = 0; |
1549 | 1538 | ||
1550 | if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && | 1539 | if (!bond->params.use_carrier && slave_dev->ethtool_ops == NULL && |
@@ -1577,16 +1566,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1577 | pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", | 1566 | pr_warning("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n", |
1578 | bond_dev->name, slave_dev->name, | 1567 | bond_dev->name, slave_dev->name, |
1579 | slave_dev->name, bond_dev->name); | 1568 | slave_dev->name, bond_dev->name); |
1580 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
1581 | } | 1569 | } |
1582 | } else { | 1570 | } else { |
1583 | pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); | 1571 | pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name); |
1584 | if (bond->slave_cnt == 0) { | ||
1585 | /* First slave, and it is not VLAN challenged, | ||
1586 | * so remove the block of adding VLANs over the bond. | ||
1587 | */ | ||
1588 | bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; | ||
1589 | } | ||
1590 | } | 1572 | } |
1591 | 1573 | ||
1592 | /* | 1574 | /* |
@@ -1775,10 +1757,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1775 | new_slave->delay = 0; | 1757 | new_slave->delay = 0; |
1776 | new_slave->link_failure_count = 0; | 1758 | new_slave->link_failure_count = 0; |
1777 | 1759 | ||
1778 | bond_compute_features(bond); | ||
1779 | |||
1780 | write_unlock_bh(&bond->lock); | 1760 | write_unlock_bh(&bond->lock); |
1781 | 1761 | ||
1762 | bond_compute_features(bond); | ||
1763 | |||
1782 | read_lock(&bond->lock); | 1764 | read_lock(&bond->lock); |
1783 | 1765 | ||
1784 | new_slave->last_arp_rx = jiffies; | 1766 | new_slave->last_arp_rx = jiffies; |
@@ -1958,7 +1940,7 @@ err_free: | |||
1958 | kfree(new_slave); | 1940 | kfree(new_slave); |
1959 | 1941 | ||
1960 | err_undo_flags: | 1942 | err_undo_flags: |
1961 | bond_dev->features = old_features; | 1943 | bond_compute_features(bond); |
1962 | 1944 | ||
1963 | return res; | 1945 | return res; |
1964 | } | 1946 | } |
@@ -1979,6 +1961,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
1979 | struct bonding *bond = netdev_priv(bond_dev); | 1961 | struct bonding *bond = netdev_priv(bond_dev); |
1980 | struct slave *slave, *oldcurrent; | 1962 | struct slave *slave, *oldcurrent; |
1981 | struct sockaddr addr; | 1963 | struct sockaddr addr; |
1964 | u32 old_features = bond_dev->features; | ||
1982 | 1965 | ||
1983 | /* slave is not a slave or master is not master of this slave */ | 1966 | /* slave is not a slave or master is not master of this slave */ |
1984 | if (!(slave_dev->flags & IFF_SLAVE) || | 1967 | if (!(slave_dev->flags & IFF_SLAVE) || |
@@ -2039,8 +2022,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2039 | /* release the slave from its bond */ | 2022 | /* release the slave from its bond */ |
2040 | bond_detach_slave(bond, slave); | 2023 | bond_detach_slave(bond, slave); |
2041 | 2024 | ||
2042 | bond_compute_features(bond); | ||
2043 | |||
2044 | if (bond->primary_slave == slave) | 2025 | if (bond->primary_slave == slave) |
2045 | bond->primary_slave = NULL; | 2026 | bond->primary_slave = NULL; |
2046 | 2027 | ||
@@ -2084,24 +2065,23 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) | |||
2084 | */ | 2065 | */ |
2085 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 2066 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
2086 | 2067 | ||
2087 | if (!bond->vlgrp) { | 2068 | if (bond->vlgrp) { |
2088 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
2089 | } else { | ||
2090 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 2069 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
2091 | bond_dev->name, bond_dev->name); | 2070 | bond_dev->name, bond_dev->name); |
2092 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", | 2071 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", |
2093 | bond_dev->name); | 2072 | bond_dev->name); |
2094 | } | 2073 | } |
2095 | } else if ((bond_dev->features & NETIF_F_VLAN_CHALLENGED) && | ||
2096 | !bond_has_challenged_slaves(bond)) { | ||
2097 | pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", | ||
2098 | bond_dev->name, slave_dev->name, bond_dev->name); | ||
2099 | bond_dev->features &= ~NETIF_F_VLAN_CHALLENGED; | ||
2100 | } | 2074 | } |
2101 | 2075 | ||
2102 | write_unlock_bh(&bond->lock); | 2076 | write_unlock_bh(&bond->lock); |
2103 | unblock_netpoll_tx(); | 2077 | unblock_netpoll_tx(); |
2104 | 2078 | ||
2079 | bond_compute_features(bond); | ||
2080 | if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) && | ||
2081 | (old_features & NETIF_F_VLAN_CHALLENGED)) | ||
2082 | pr_info("%s: last VLAN challenged slave %s left bond %s. VLAN blocking is removed\n", | ||
2083 | bond_dev->name, slave_dev->name, bond_dev->name); | ||
2084 | |||
2105 | /* must do this from outside any spinlocks */ | 2085 | /* must do this from outside any spinlocks */ |
2106 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 2086 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
2107 | 2087 | ||
@@ -2219,8 +2199,6 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2219 | bond_alb_deinit_slave(bond, slave); | 2199 | bond_alb_deinit_slave(bond, slave); |
2220 | } | 2200 | } |
2221 | 2201 | ||
2222 | bond_compute_features(bond); | ||
2223 | |||
2224 | bond_destroy_slave_symlinks(bond_dev, slave_dev); | 2202 | bond_destroy_slave_symlinks(bond_dev, slave_dev); |
2225 | bond_del_vlans_from_slave(bond, slave_dev); | 2203 | bond_del_vlans_from_slave(bond, slave_dev); |
2226 | 2204 | ||
@@ -2269,9 +2247,7 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2269 | */ | 2247 | */ |
2270 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); | 2248 | memset(bond_dev->dev_addr, 0, bond_dev->addr_len); |
2271 | 2249 | ||
2272 | if (!bond->vlgrp) { | 2250 | if (bond->vlgrp) { |
2273 | bond_dev->features |= NETIF_F_VLAN_CHALLENGED; | ||
2274 | } else { | ||
2275 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", | 2251 | pr_warning("%s: Warning: clearing HW address of %s while it still has VLANs.\n", |
2276 | bond_dev->name, bond_dev->name); | 2252 | bond_dev->name, bond_dev->name); |
2277 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", | 2253 | pr_warning("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs'.\n", |
@@ -2282,6 +2258,9 @@ static int bond_release_all(struct net_device *bond_dev) | |||
2282 | 2258 | ||
2283 | out: | 2259 | out: |
2284 | write_unlock_bh(&bond->lock); | 2260 | write_unlock_bh(&bond->lock); |
2261 | |||
2262 | bond_compute_features(bond); | ||
2263 | |||
2285 | return 0; | 2264 | return 0; |
2286 | } | 2265 | } |
2287 | 2266 | ||
@@ -4337,11 +4316,6 @@ static void bond_ethtool_get_drvinfo(struct net_device *bond_dev, | |||
4337 | static const struct ethtool_ops bond_ethtool_ops = { | 4316 | static const struct ethtool_ops bond_ethtool_ops = { |
4338 | .get_drvinfo = bond_ethtool_get_drvinfo, | 4317 | .get_drvinfo = bond_ethtool_get_drvinfo, |
4339 | .get_link = ethtool_op_get_link, | 4318 | .get_link = ethtool_op_get_link, |
4340 | .get_tx_csum = ethtool_op_get_tx_csum, | ||
4341 | .get_sg = ethtool_op_get_sg, | ||
4342 | .get_tso = ethtool_op_get_tso, | ||
4343 | .get_ufo = ethtool_op_get_ufo, | ||
4344 | .get_flags = ethtool_op_get_flags, | ||
4345 | }; | 4319 | }; |
4346 | 4320 | ||
4347 | static const struct net_device_ops bond_netdev_ops = { | 4321 | static const struct net_device_ops bond_netdev_ops = { |
@@ -4367,6 +4341,7 @@ static const struct net_device_ops bond_netdev_ops = { | |||
4367 | #endif | 4341 | #endif |
4368 | .ndo_add_slave = bond_enslave, | 4342 | .ndo_add_slave = bond_enslave, |
4369 | .ndo_del_slave = bond_release, | 4343 | .ndo_del_slave = bond_release, |
4344 | .ndo_fix_features = bond_fix_features, | ||
4370 | }; | 4345 | }; |
4371 | 4346 | ||
4372 | static void bond_destructor(struct net_device *bond_dev) | 4347 | static void bond_destructor(struct net_device *bond_dev) |
@@ -4422,14 +4397,14 @@ static void bond_setup(struct net_device *bond_dev) | |||
4422 | * when there are slaves that are not hw accel | 4397 | * when there are slaves that are not hw accel |
4423 | * capable | 4398 | * capable |
4424 | */ | 4399 | */ |
4425 | bond_dev->features |= (NETIF_F_HW_VLAN_TX | | ||
4426 | NETIF_F_HW_VLAN_RX | | ||
4427 | NETIF_F_HW_VLAN_FILTER); | ||
4428 | 4400 | ||
4429 | /* By default, we enable GRO on bonding devices. | 4401 | bond_dev->hw_features = BOND_VLAN_FEATURES | |
4430 | * Actual support requires lowlevel drivers are GRO ready. | 4402 | NETIF_F_HW_VLAN_TX | |
4431 | */ | 4403 | NETIF_F_HW_VLAN_RX | |
4432 | bond_dev->features |= NETIF_F_GRO; | 4404 | NETIF_F_HW_VLAN_FILTER; |
4405 | |||
4406 | bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_NO_CSUM); | ||
4407 | bond_dev->features |= bond_dev->hw_features; | ||
4433 | } | 4408 | } |
4434 | 4409 | ||
4435 | static void bond_work_cancel_all(struct bonding *bond) | 4410 | static void bond_work_cancel_all(struct bonding *bond) |
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index ec195304310..d5a1f9e3794 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c | |||
@@ -3373,8 +3373,8 @@ relink: | |||
3373 | tg3_phy_copper_begin(tp); | 3373 | tg3_phy_copper_begin(tp); |
3374 | 3374 | ||
3375 | tg3_readphy(tp, MII_BMSR, &bmsr); | 3375 | tg3_readphy(tp, MII_BMSR, &bmsr); |
3376 | if (!tg3_readphy(tp, MII_BMSR, &bmsr) && | 3376 | if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) || |
3377 | (bmsr & BMSR_LSTATUS)) | 3377 | (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) |
3378 | current_link_up = 1; | 3378 | current_link_up = 1; |
3379 | } | 3379 | } |
3380 | 3380 | ||
@@ -6309,6 +6309,42 @@ dma_error: | |||
6309 | return NETDEV_TX_OK; | 6309 | return NETDEV_TX_OK; |
6310 | } | 6310 | } |
6311 | 6311 | ||
6312 | static void tg3_set_loopback(struct net_device *dev, u32 features) | ||
6313 | { | ||
6314 | struct tg3 *tp = netdev_priv(dev); | ||
6315 | |||
6316 | if (features & NETIF_F_LOOPBACK) { | ||
6317 | if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK) | ||
6318 | return; | ||
6319 | |||
6320 | /* | ||
6321 | * Clear MAC_MODE_HALF_DUPLEX or you won't get packets back in | ||
6322 | * loopback mode if Half-Duplex mode was negotiated earlier. | ||
6323 | */ | ||
6324 | tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX; | ||
6325 | |||
6326 | /* Enable internal MAC loopback mode */ | ||
6327 | tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK; | ||
6328 | spin_lock_bh(&tp->lock); | ||
6329 | tw32(MAC_MODE, tp->mac_mode); | ||
6330 | netif_carrier_on(tp->dev); | ||
6331 | spin_unlock_bh(&tp->lock); | ||
6332 | netdev_info(dev, "Internal MAC loopback mode enabled.\n"); | ||
6333 | } else { | ||
6334 | if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)) | ||
6335 | return; | ||
6336 | |||
6337 | /* Disable internal MAC loopback mode */ | ||
6338 | tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK; | ||
6339 | spin_lock_bh(&tp->lock); | ||
6340 | tw32(MAC_MODE, tp->mac_mode); | ||
6341 | /* Force link status check */ | ||
6342 | tg3_setup_phy(tp, 1); | ||
6343 | spin_unlock_bh(&tp->lock); | ||
6344 | netdev_info(dev, "Internal MAC loopback mode disabled.\n"); | ||
6345 | } | ||
6346 | } | ||
6347 | |||
6312 | static u32 tg3_fix_features(struct net_device *dev, u32 features) | 6348 | static u32 tg3_fix_features(struct net_device *dev, u32 features) |
6313 | { | 6349 | { |
6314 | struct tg3 *tp = netdev_priv(dev); | 6350 | struct tg3 *tp = netdev_priv(dev); |
@@ -6319,6 +6355,16 @@ static u32 tg3_fix_features(struct net_device *dev, u32 features) | |||
6319 | return features; | 6355 | return features; |
6320 | } | 6356 | } |
6321 | 6357 | ||
6358 | static int tg3_set_features(struct net_device *dev, u32 features) | ||
6359 | { | ||
6360 | u32 changed = dev->features ^ features; | ||
6361 | |||
6362 | if ((changed & NETIF_F_LOOPBACK) && netif_running(dev)) | ||
6363 | tg3_set_loopback(dev, features); | ||
6364 | |||
6365 | return 0; | ||
6366 | } | ||
6367 | |||
6322 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, | 6368 | static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp, |
6323 | int new_mtu) | 6369 | int new_mtu) |
6324 | { | 6370 | { |
@@ -9485,6 +9531,13 @@ static int tg3_open(struct net_device *dev) | |||
9485 | 9531 | ||
9486 | netif_tx_start_all_queues(dev); | 9532 | netif_tx_start_all_queues(dev); |
9487 | 9533 | ||
9534 | /* | ||
9535 | * Reset loopback feature if it was turned on while the device was down | ||
9536 | * make sure that it's installed properly now. | ||
9537 | */ | ||
9538 | if (dev->features & NETIF_F_LOOPBACK) | ||
9539 | tg3_set_loopback(dev, dev->features); | ||
9540 | |||
9488 | return 0; | 9541 | return 0; |
9489 | 9542 | ||
9490 | err_out3: | 9543 | err_out3: |
@@ -15033,6 +15086,7 @@ static const struct net_device_ops tg3_netdev_ops = { | |||
15033 | .ndo_tx_timeout = tg3_tx_timeout, | 15086 | .ndo_tx_timeout = tg3_tx_timeout, |
15034 | .ndo_change_mtu = tg3_change_mtu, | 15087 | .ndo_change_mtu = tg3_change_mtu, |
15035 | .ndo_fix_features = tg3_fix_features, | 15088 | .ndo_fix_features = tg3_fix_features, |
15089 | .ndo_set_features = tg3_set_features, | ||
15036 | #ifdef CONFIG_NET_POLL_CONTROLLER | 15090 | #ifdef CONFIG_NET_POLL_CONTROLLER |
15037 | .ndo_poll_controller = tg3_poll_controller, | 15091 | .ndo_poll_controller = tg3_poll_controller, |
15038 | #endif | 15092 | #endif |
@@ -15049,6 +15103,7 @@ static const struct net_device_ops tg3_netdev_ops_dma_bug = { | |||
15049 | .ndo_do_ioctl = tg3_ioctl, | 15103 | .ndo_do_ioctl = tg3_ioctl, |
15050 | .ndo_tx_timeout = tg3_tx_timeout, | 15104 | .ndo_tx_timeout = tg3_tx_timeout, |
15051 | .ndo_change_mtu = tg3_change_mtu, | 15105 | .ndo_change_mtu = tg3_change_mtu, |
15106 | .ndo_set_features = tg3_set_features, | ||
15052 | #ifdef CONFIG_NET_POLL_CONTROLLER | 15107 | #ifdef CONFIG_NET_POLL_CONTROLLER |
15053 | .ndo_poll_controller = tg3_poll_controller, | 15108 | .ndo_poll_controller = tg3_poll_controller, |
15054 | #endif | 15109 | #endif |
@@ -15246,6 +15301,16 @@ static int __devinit tg3_init_one(struct pci_dev *pdev, | |||
15246 | dev->features |= hw_features; | 15301 | dev->features |= hw_features; |
15247 | dev->vlan_features |= hw_features; | 15302 | dev->vlan_features |= hw_features; |
15248 | 15303 | ||
15304 | /* | ||
15305 | * Add loopback capability only for a subset of devices that support | ||
15306 | * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY | ||
15307 | * loopback for the remaining devices. | ||
15308 | */ | ||
15309 | if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 && | ||
15310 | !tg3_flag(tp, CPMU_PRESENT)) | ||
15311 | /* Add the loopback capability */ | ||
15312 | dev->hw_features |= NETIF_F_LOOPBACK; | ||
15313 | |||
15249 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && | 15314 | if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && |
15250 | !tg3_flag(tp, TSO_CAPABLE) && | 15315 | !tg3_flag(tp, TSO_CAPABLE) && |
15251 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { | 15316 | !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { |
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c index 1033ef6476a..4ab557d0287 100644 --- a/drivers/net/usb/cdc_ncm.c +++ b/drivers/net/usb/cdc_ncm.c | |||
@@ -54,13 +54,13 @@ | |||
54 | #include <linux/usb/usbnet.h> | 54 | #include <linux/usb/usbnet.h> |
55 | #include <linux/usb/cdc.h> | 55 | #include <linux/usb/cdc.h> |
56 | 56 | ||
57 | #define DRIVER_VERSION "23-Apr-2011" | 57 | #define DRIVER_VERSION "06-May-2011" |
58 | 58 | ||
59 | /* CDC NCM subclass 3.2.1 */ | 59 | /* CDC NCM subclass 3.2.1 */ |
60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 | 60 | #define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10 |
61 | 61 | ||
62 | /* Maximum NTB length */ | 62 | /* Maximum NTB length */ |
63 | #define CDC_NCM_NTB_MAX_SIZE_TX (16384 + 4) /* bytes, must be short terminated */ | 63 | #define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */ |
64 | #define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */ | 64 | #define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */ |
65 | 65 | ||
66 | /* Minimum value for MaxDatagramSize, ch. 6.2.9 */ | 66 | /* Minimum value for MaxDatagramSize, ch. 6.2.9 */ |
@@ -722,7 +722,7 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
722 | 722 | ||
723 | } else { | 723 | } else { |
724 | /* reset variables */ | 724 | /* reset variables */ |
725 | skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC); | 725 | skb_out = alloc_skb((ctx->tx_max + 1), GFP_ATOMIC); |
726 | if (skb_out == NULL) { | 726 | if (skb_out == NULL) { |
727 | if (skb != NULL) { | 727 | if (skb != NULL) { |
728 | dev_kfree_skb_any(skb); | 728 | dev_kfree_skb_any(skb); |
@@ -861,8 +861,11 @@ cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb) | |||
861 | /* store last offset */ | 861 | /* store last offset */ |
862 | last_offset = offset; | 862 | last_offset = offset; |
863 | 863 | ||
864 | if ((last_offset < ctx->tx_max) && ((last_offset % | 864 | if (((last_offset < ctx->tx_max) && ((last_offset % |
865 | le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) { | 865 | le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) || |
866 | (((last_offset == ctx->tx_max) && ((ctx->tx_max % | ||
867 | le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) && | ||
868 | (ctx->tx_max < le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)))) { | ||
866 | /* force short packet */ | 869 | /* force short packet */ |
867 | *(((u8 *)skb_out->data) + last_offset) = 0; | 870 | *(((u8 *)skb_out->data) + last_offset) = 0; |
868 | last_offset++; | 871 | last_offset++; |
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index c0da2309616..fa6e2ac7475 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c | |||
@@ -2884,6 +2884,9 @@ vmxnet3_probe_device(struct pci_dev *pdev, | |||
2884 | int num_tx_queues; | 2884 | int num_tx_queues; |
2885 | int num_rx_queues; | 2885 | int num_rx_queues; |
2886 | 2886 | ||
2887 | if (!pci_msi_enabled()) | ||
2888 | enable_mq = 0; | ||
2889 | |||
2887 | #ifdef VMXNET3_RSS | 2890 | #ifdef VMXNET3_RSS |
2888 | if (enable_mq) | 2891 | if (enable_mq) |
2889 | num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, | 2892 | num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES, |
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h index 8ba7b5f67de..f50d36fdf40 100644 --- a/drivers/net/vmxnet3/vmxnet3_int.h +++ b/drivers/net/vmxnet3/vmxnet3_int.h | |||
@@ -68,10 +68,10 @@ | |||
68 | /* | 68 | /* |
69 | * Version numbers | 69 | * Version numbers |
70 | */ | 70 | */ |
71 | #define VMXNET3_DRIVER_VERSION_STRING "1.0.25.0-k" | 71 | #define VMXNET3_DRIVER_VERSION_STRING "1.1.9.0-k" |
72 | 72 | ||
73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ | 73 | /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ |
74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01001900 | 74 | #define VMXNET3_DRIVER_VERSION_NUM 0x01010900 |
75 | 75 | ||
76 | #if defined(CONFIG_PCI_MSI) | 76 | #if defined(CONFIG_PCI_MSI) |
77 | /* RSS only makes sense if MSI-X is supported. */ | 77 | /* RSS only makes sense if MSI-X is supported. */ |