aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/bluetooth/bluecard_cs.c2
-rw-r--r--drivers/bluetooth/hci_bcsp.c2
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_main.c33
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c17
-rw-r--r--drivers/net/ll_temac_main.c18
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/ne.c4
-rw-r--r--drivers/net/qlge/qlge_main.c11
-rw-r--r--drivers/net/s2io.c101
-rw-r--r--drivers/net/s2io.h4
-rw-r--r--drivers/net/sb1250-mac.c1
-rw-r--r--drivers/net/usb/rndis_host.c18
-rw-r--r--drivers/net/usb/usbnet.c5
-rw-r--r--drivers/net/virtio_net.c28
-rw-r--r--drivers/net/vxge/vxge-main.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c7
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/vhost.c86
-rw-r--r--drivers/vhost/vhost.h8
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/mv643xx_eth.h5
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h5
-rw-r--r--include/net/sch_generic.h20
-rw-r--r--include/net/xfrm.h2
-rw-r--r--net/bridge/br_multicast.c21
-rw-r--r--net/bridge/br_netfilter.c3
-rw-r--r--net/core/dev.c18
-rw-r--r--net/core/ethtool.c41
-rw-r--r--net/ipv4/xfrm4_policy.c2
-rw-r--r--net/ipv6/netfilter/ip6t_REJECT.c6
-rw-r--r--net/ipv6/xfrm6_policy.c2
36 files changed, 332 insertions, 189 deletions
diff --git a/drivers/bluetooth/bluecard_cs.c b/drivers/bluetooth/bluecard_cs.c
index 6f907ebed2d5..6d34f405a2f3 100644
--- a/drivers/bluetooth/bluecard_cs.c
+++ b/drivers/bluetooth/bluecard_cs.c
@@ -37,7 +37,7 @@
37#include <linux/wait.h> 37#include <linux/wait.h>
38 38
39#include <linux/skbuff.h> 39#include <linux/skbuff.h>
40#include <asm/io.h> 40#include <linux/io.h>
41 41
42#include <pcmcia/cs_types.h> 42#include <pcmcia/cs_types.h>
43#include <pcmcia/cs.h> 43#include <pcmcia/cs.h>
diff --git a/drivers/bluetooth/hci_bcsp.c b/drivers/bluetooth/hci_bcsp.c
index 40aec0fb8596..42d69d4de05c 100644
--- a/drivers/bluetooth/hci_bcsp.c
+++ b/drivers/bluetooth/hci_bcsp.c
@@ -244,7 +244,7 @@ static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data,
244 if (rel) { 244 if (rel) {
245 hdr[0] |= 0x80 + bcsp->msgq_txseq; 245 hdr[0] |= 0x80 + bcsp->msgq_txseq;
246 BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); 246 BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq);
247 bcsp->msgq_txseq = ++(bcsp->msgq_txseq) & 0x07; 247 bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07;
248 } 248 }
249 249
250 if (bcsp->use_crc) 250 if (bcsp->use_crc)
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 40fdc41446cc..df483076eda6 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -340,7 +340,8 @@ static void rlb_update_entry_from_arp(struct bonding *bond, struct arp_pkt *arp)
340 340
341 if ((client_info->assigned) && 341 if ((client_info->assigned) &&
342 (client_info->ip_src == arp->ip_dst) && 342 (client_info->ip_src == arp->ip_dst) &&
343 (client_info->ip_dst == arp->ip_src)) { 343 (client_info->ip_dst == arp->ip_src) &&
344 (compare_ether_addr_64bits(client_info->mac_dst, arp->mac_src))) {
344 /* update the clients MAC address */ 345 /* update the clients MAC address */
345 memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN); 346 memcpy(client_info->mac_dst, arp->mac_src, ETH_ALEN);
346 client_info->ntt = 1; 347 client_info->ntt = 1;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 5e12462a9d5e..c3d98dde2f86 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -168,7 +168,7 @@ static int arp_ip_count;
168static int bond_mode = BOND_MODE_ROUNDROBIN; 168static int bond_mode = BOND_MODE_ROUNDROBIN;
169static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2; 169static int xmit_hashtype = BOND_XMIT_POLICY_LAYER2;
170static int lacp_fast; 170static int lacp_fast;
171 171static int disable_netpoll = 1;
172 172
173const struct bond_parm_tbl bond_lacp_tbl[] = { 173const struct bond_parm_tbl bond_lacp_tbl[] = {
174{ "slow", AD_LACP_SLOW}, 174{ "slow", AD_LACP_SLOW},
@@ -1742,15 +1742,23 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1742 bond_set_carrier(bond); 1742 bond_set_carrier(bond);
1743 1743
1744#ifdef CONFIG_NET_POLL_CONTROLLER 1744#ifdef CONFIG_NET_POLL_CONTROLLER
1745 if (slaves_support_netpoll(bond_dev)) { 1745 /*
1746 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 1746 * Netpoll and bonding is broken, make sure it is not initialized
1747 if (bond_dev->npinfo) 1747 * until it is fixed.
1748 slave_dev->npinfo = bond_dev->npinfo; 1748 */
1749 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) { 1749 if (disable_netpoll) {
1750 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1750 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1751 pr_info("New slave device %s does not support netpoll\n", 1751 } else {
1752 slave_dev->name); 1752 if (slaves_support_netpoll(bond_dev)) {
1753 pr_info("Disabling netpoll support for %s\n", bond_dev->name); 1753 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1754 if (bond_dev->npinfo)
1755 slave_dev->npinfo = bond_dev->npinfo;
1756 } else if (!(bond_dev->priv_flags & IFF_DISABLE_NETPOLL)) {
1757 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1758 pr_info("New slave device %s does not support netpoll\n",
1759 slave_dev->name);
1760 pr_info("Disabling netpoll support for %s\n", bond_dev->name);
1761 }
1754 } 1762 }
1755#endif 1763#endif
1756 read_unlock(&bond->lock); 1764 read_unlock(&bond->lock);
@@ -1950,8 +1958,11 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1950 1958
1951#ifdef CONFIG_NET_POLL_CONTROLLER 1959#ifdef CONFIG_NET_POLL_CONTROLLER
1952 read_lock_bh(&bond->lock); 1960 read_lock_bh(&bond->lock);
1953 if (slaves_support_netpoll(bond_dev)) 1961
1954 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL; 1962 /* Make sure netpoll over stays disabled until fixed. */
1963 if (!disable_netpoll)
1964 if (slaves_support_netpoll(bond_dev))
1965 bond_dev->priv_flags &= ~IFF_DISABLE_NETPOLL;
1955 read_unlock_bh(&bond->lock); 1966 read_unlock_bh(&bond->lock);
1956 if (slave_dev->netdev_ops->ndo_netpoll_cleanup) 1967 if (slave_dev->netdev_ops->ndo_netpoll_cleanup)
1957 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev); 1968 slave_dev->netdev_ops->ndo_netpoll_cleanup(slave_dev);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index ce30c62a97f7..7b5d9764f317 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3684,10 +3684,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3684 /* signal that we are down to the interrupt handler */ 3684 /* signal that we are down to the interrupt handler */
3685 set_bit(__IXGBE_DOWN, &adapter->state); 3685 set_bit(__IXGBE_DOWN, &adapter->state);
3686 3686
3687 /* power down the optics */
3688 if (hw->phy.multispeed_fiber)
3689 hw->mac.ops.disable_tx_laser(hw);
3690
3691 /* disable receive for all VFs and wait one second */ 3687 /* disable receive for all VFs and wait one second */
3692 if (adapter->num_vfs) { 3688 if (adapter->num_vfs) {
3693 /* ping all the active vfs to let them know we are going down */ 3689 /* ping all the active vfs to let them know we are going down */
@@ -3742,6 +3738,10 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3742 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 3738 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3743 ~IXGBE_DMATXCTL_TE)); 3739 ~IXGBE_DMATXCTL_TE));
3744 3740
3741 /* power down the optics */
3742 if (hw->phy.multispeed_fiber)
3743 hw->mac.ops.disable_tx_laser(hw);
3744
3745 /* clear n-tuple filters that are cached */ 3745 /* clear n-tuple filters that are cached */
3746 ethtool_ntuple_flush(netdev); 3746 ethtool_ntuple_flush(netdev);
3747 3747
@@ -4001,7 +4001,7 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter)
4001 4001
4002done: 4002done:
4003 /* Notify the stack of the (possibly) reduced Tx Queue count. */ 4003 /* Notify the stack of the (possibly) reduced Tx Queue count. */
4004 adapter->netdev->real_num_tx_queues = adapter->num_tx_queues; 4004 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
4005} 4005}
4006 4006
4007static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, 4007static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
@@ -5195,7 +5195,6 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5195 ixgbe_free_all_tx_resources(adapter); 5195 ixgbe_free_all_tx_resources(adapter);
5196 ixgbe_free_all_rx_resources(adapter); 5196 ixgbe_free_all_rx_resources(adapter);
5197 } 5197 }
5198 ixgbe_clear_interrupt_scheme(adapter);
5199 5198
5200#ifdef CONFIG_PM 5199#ifdef CONFIG_PM
5201 retval = pci_save_state(pdev); 5200 retval = pci_save_state(pdev);
@@ -5230,6 +5229,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5230 5229
5231 *enable_wake = !!wufc; 5230 *enable_wake = !!wufc;
5232 5231
5232 ixgbe_clear_interrupt_scheme(adapter);
5233
5233 ixgbe_release_hw_control(adapter); 5234 ixgbe_release_hw_control(adapter);
5234 5235
5235 pci_disable_device(pdev); 5236 pci_disable_device(pdev);
@@ -6023,7 +6024,6 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6023static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6024static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6024 int queue, u32 tx_flags) 6025 int queue, u32 tx_flags)
6025{ 6026{
6026 /* Right now, we support IPv4 only */
6027 struct ixgbe_atr_input atr_input; 6027 struct ixgbe_atr_input atr_input;
6028 struct tcphdr *th; 6028 struct tcphdr *th;
6029 struct iphdr *iph = ip_hdr(skb); 6029 struct iphdr *iph = ip_hdr(skb);
@@ -6032,6 +6032,9 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
6032 u32 src_ipv4_addr, dst_ipv4_addr; 6032 u32 src_ipv4_addr, dst_ipv4_addr;
6033 u8 l4type = 0; 6033 u8 l4type = 0;
6034 6034
6035 /* Right now, we support IPv4 only */
6036 if (skb->protocol != htons(ETH_P_IP))
6037 return;
6035 /* check if we're UDP or TCP */ 6038 /* check if we're UDP or TCP */
6036 if (iph->protocol == IPPROTO_TCP) { 6039 if (iph->protocol == IPPROTO_TCP) {
6037 th = tcp_hdr(skb); 6040 th = tcp_hdr(skb);
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 52dcc8495647..6474c4973d3a 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -964,7 +964,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
964 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); 964 np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
965 if (!np) { 965 if (!np) {
966 dev_err(&op->dev, "could not find DMA node\n"); 966 dev_err(&op->dev, "could not find DMA node\n");
967 goto nodev; 967 goto err_iounmap;
968 } 968 }
969 969
970 /* Setup the DMA register accesses, could be DCR or memory mapped */ 970 /* Setup the DMA register accesses, could be DCR or memory mapped */
@@ -978,7 +978,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
978 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs); 978 dev_dbg(&op->dev, "MEM base: %p\n", lp->sdma_regs);
979 } else { 979 } else {
980 dev_err(&op->dev, "unable to map DMA registers\n"); 980 dev_err(&op->dev, "unable to map DMA registers\n");
981 goto nodev; 981 goto err_iounmap;
982 } 982 }
983 } 983 }
984 984
@@ -987,7 +987,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
987 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) { 987 if ((lp->rx_irq == NO_IRQ) || (lp->tx_irq == NO_IRQ)) {
988 dev_err(&op->dev, "could not determine irqs\n"); 988 dev_err(&op->dev, "could not determine irqs\n");
989 rc = -ENOMEM; 989 rc = -ENOMEM;
990 goto nodev; 990 goto err_iounmap_2;
991 } 991 }
992 992
993 of_node_put(np); /* Finished with the DMA node; drop the reference */ 993 of_node_put(np); /* Finished with the DMA node; drop the reference */
@@ -997,7 +997,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
997 if ((!addr) || (size != 6)) { 997 if ((!addr) || (size != 6)) {
998 dev_err(&op->dev, "could not find MAC address\n"); 998 dev_err(&op->dev, "could not find MAC address\n");
999 rc = -ENODEV; 999 rc = -ENODEV;
1000 goto nodev; 1000 goto err_iounmap_2;
1001 } 1001 }
1002 temac_set_mac_address(ndev, (void *)addr); 1002 temac_set_mac_address(ndev, (void *)addr);
1003 1003
@@ -1013,7 +1013,7 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
1013 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group); 1013 rc = sysfs_create_group(&lp->dev->kobj, &temac_attr_group);
1014 if (rc) { 1014 if (rc) {
1015 dev_err(lp->dev, "Error creating sysfs files\n"); 1015 dev_err(lp->dev, "Error creating sysfs files\n");
1016 goto nodev; 1016 goto err_iounmap_2;
1017 } 1017 }
1018 1018
1019 rc = register_netdev(lp->ndev); 1019 rc = register_netdev(lp->ndev);
@@ -1026,6 +1026,11 @@ temac_of_probe(struct of_device *op, const struct of_device_id *match)
1026 1026
1027 err_register_ndev: 1027 err_register_ndev:
1028 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group); 1028 sysfs_remove_group(&lp->dev->kobj, &temac_attr_group);
1029 err_iounmap_2:
1030 if (lp->sdma_regs)
1031 iounmap(lp->sdma_regs);
1032 err_iounmap:
1033 iounmap(lp->regs);
1029 nodev: 1034 nodev:
1030 free_netdev(ndev); 1035 free_netdev(ndev);
1031 ndev = NULL; 1036 ndev = NULL;
@@ -1044,6 +1049,9 @@ static int __devexit temac_of_remove(struct of_device *op)
1044 of_node_put(lp->phy_node); 1049 of_node_put(lp->phy_node);
1045 lp->phy_node = NULL; 1050 lp->phy_node = NULL;
1046 dev_set_drvdata(&op->dev, NULL); 1051 dev_set_drvdata(&op->dev, NULL);
1052 iounmap(lp->regs);
1053 if (lp->sdma_regs)
1054 iounmap(lp->sdma_regs);
1047 free_netdev(ndev); 1055 free_netdev(ndev);
1048 return 0; 1056 return 0;
1049} 1057}
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index e345ec8cb473..73bb8ea6f54a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -289,6 +289,7 @@ struct mv643xx_eth_shared_private {
289 unsigned int t_clk; 289 unsigned int t_clk;
290 int extended_rx_coal_limit; 290 int extended_rx_coal_limit;
291 int tx_bw_control; 291 int tx_bw_control;
292 int tx_csum_limit;
292}; 293};
293 294
294#define TX_BW_CONTROL_ABSENT 0 295#define TX_BW_CONTROL_ABSENT 0
@@ -776,13 +777,16 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb)
776 l4i_chk = 0; 777 l4i_chk = 0;
777 778
778 if (skb->ip_summed == CHECKSUM_PARTIAL) { 779 if (skb->ip_summed == CHECKSUM_PARTIAL) {
780 int hdr_len;
779 int tag_bytes; 781 int tag_bytes;
780 782
781 BUG_ON(skb->protocol != htons(ETH_P_IP) && 783 BUG_ON(skb->protocol != htons(ETH_P_IP) &&
782 skb->protocol != htons(ETH_P_8021Q)); 784 skb->protocol != htons(ETH_P_8021Q));
783 785
784 tag_bytes = (void *)ip_hdr(skb) - (void *)skb->data - ETH_HLEN; 786 hdr_len = (void *)ip_hdr(skb) - (void *)skb->data;
785 if (unlikely(tag_bytes & ~12)) { 787 tag_bytes = hdr_len - ETH_HLEN;
788 if (skb->len - hdr_len > mp->shared->tx_csum_limit ||
789 unlikely(tag_bytes & ~12)) {
786 if (skb_checksum_help(skb) == 0) 790 if (skb_checksum_help(skb) == 0)
787 goto no_csum; 791 goto no_csum;
788 kfree_skb(skb); 792 kfree_skb(skb);
@@ -2666,6 +2670,7 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2666 * Detect hardware parameters. 2670 * Detect hardware parameters.
2667 */ 2671 */
2668 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000; 2672 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2673 msp->tx_csum_limit = pd->tx_csum_limit ? pd->tx_csum_limit : 9 * 1024;
2669 infer_hw_params(msp); 2674 infer_hw_params(msp);
2670 2675
2671 platform_set_drvdata(pdev, msp); 2676 platform_set_drvdata(pdev, msp);
diff --git a/drivers/net/ne.c b/drivers/net/ne.c
index b8e2923a1d69..1063093b3afc 100644
--- a/drivers/net/ne.c
+++ b/drivers/net/ne.c
@@ -806,8 +806,10 @@ static int __init ne_drv_probe(struct platform_device *pdev)
806 dev->base_addr = res->start; 806 dev->base_addr = res->start;
807 dev->irq = platform_get_irq(pdev, 0); 807 dev->irq = platform_get_irq(pdev, 0);
808 } else { 808 } else {
809 if (this_dev < 0 || this_dev >= MAX_NE_CARDS) 809 if (this_dev < 0 || this_dev >= MAX_NE_CARDS) {
810 free_netdev(dev);
810 return -EINVAL; 811 return -EINVAL;
812 }
811 dev->base_addr = io[this_dev]; 813 dev->base_addr = io[this_dev];
812 dev->irq = irq[this_dev]; 814 dev->irq = irq[this_dev];
813 dev->mem_end = bad[this_dev]; 815 dev->mem_end = bad[this_dev];
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index fa4b24c49f42..d10bcefc0e45 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -4611,8 +4611,7 @@ static void ql_timer(unsigned long data)
4611 return; 4611 return;
4612 } 4612 }
4613 4613
4614 qdev->timer.expires = jiffies + (5*HZ); 4614 mod_timer(&qdev->timer, jiffies + (5*HZ));
4615 add_timer(&qdev->timer);
4616} 4615}
4617 4616
4618static int __devinit qlge_probe(struct pci_dev *pdev, 4617static int __devinit qlge_probe(struct pci_dev *pdev,
@@ -4713,6 +4712,8 @@ static void ql_eeh_close(struct net_device *ndev)
4713 netif_stop_queue(ndev); 4712 netif_stop_queue(ndev);
4714 } 4713 }
4715 4714
4715 /* Disabling the timer */
4716 del_timer_sync(&qdev->timer);
4716 if (test_bit(QL_ADAPTER_UP, &qdev->flags)) 4717 if (test_bit(QL_ADAPTER_UP, &qdev->flags))
4717 cancel_delayed_work_sync(&qdev->asic_reset_work); 4718 cancel_delayed_work_sync(&qdev->asic_reset_work);
4718 cancel_delayed_work_sync(&qdev->mpi_reset_work); 4719 cancel_delayed_work_sync(&qdev->mpi_reset_work);
@@ -4808,8 +4809,7 @@ static void qlge_io_resume(struct pci_dev *pdev)
4808 netif_err(qdev, ifup, qdev->ndev, 4809 netif_err(qdev, ifup, qdev->ndev,
4809 "Device was not running prior to EEH.\n"); 4810 "Device was not running prior to EEH.\n");
4810 } 4811 }
4811 qdev->timer.expires = jiffies + (5*HZ); 4812 mod_timer(&qdev->timer, jiffies + (5*HZ));
4812 add_timer(&qdev->timer);
4813 netif_device_attach(ndev); 4813 netif_device_attach(ndev);
4814} 4814}
4815 4815
@@ -4871,8 +4871,7 @@ static int qlge_resume(struct pci_dev *pdev)
4871 return err; 4871 return err;
4872 } 4872 }
4873 4873
4874 qdev->timer.expires = jiffies + (5*HZ); 4874 mod_timer(&qdev->timer, jiffies + (5*HZ));
4875 add_timer(&qdev->timer);
4876 netif_device_attach(ndev); 4875 netif_device_attach(ndev);
4877 4876
4878 return 0; 4877 return 0;
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index 668327ccd8d0..1d37f0c310ca 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -3130,7 +3130,6 @@ static void tx_intr_handler(struct fifo_info *fifo_data)
3130 pkt_cnt++; 3130 pkt_cnt++;
3131 3131
3132 /* Updating the statistics block */ 3132 /* Updating the statistics block */
3133 nic->dev->stats.tx_bytes += skb->len;
3134 swstats->mem_freed += skb->truesize; 3133 swstats->mem_freed += skb->truesize;
3135 dev_kfree_skb_irq(skb); 3134 dev_kfree_skb_irq(skb);
3136 3135
@@ -4901,48 +4900,81 @@ static void s2io_updt_stats(struct s2io_nic *sp)
4901 * Return value: 4900 * Return value:
4902 * pointer to the updated net_device_stats structure. 4901 * pointer to the updated net_device_stats structure.
4903 */ 4902 */
4904
4905static struct net_device_stats *s2io_get_stats(struct net_device *dev) 4903static struct net_device_stats *s2io_get_stats(struct net_device *dev)
4906{ 4904{
4907 struct s2io_nic *sp = netdev_priv(dev); 4905 struct s2io_nic *sp = netdev_priv(dev);
4908 struct config_param *config = &sp->config;
4909 struct mac_info *mac_control = &sp->mac_control; 4906 struct mac_info *mac_control = &sp->mac_control;
4910 struct stat_block *stats = mac_control->stats_info; 4907 struct stat_block *stats = mac_control->stats_info;
4911 int i; 4908 u64 delta;
4912 4909
4913 /* Configure Stats for immediate updt */ 4910 /* Configure Stats for immediate updt */
4914 s2io_updt_stats(sp); 4911 s2io_updt_stats(sp);
4915 4912
4916 /* Using sp->stats as a staging area, because reset (due to mtu 4913 /* A device reset will cause the on-adapter statistics to be zero'ed.
4917 change, for example) will clear some hardware counters */ 4914 * This can be done while running by changing the MTU. To prevent the
4918 dev->stats.tx_packets += le32_to_cpu(stats->tmac_frms) - 4915 * system from having the stats zero'ed, the driver keeps a copy of the
4919 sp->stats.tx_packets; 4916 * last update to the system (which is also zero'ed on reset). This
4920 sp->stats.tx_packets = le32_to_cpu(stats->tmac_frms); 4917 * enables the driver to accurately know the delta between the last
4921 4918 * update and the current update.
4922 dev->stats.tx_errors += le32_to_cpu(stats->tmac_any_err_frms) - 4919 */
4923 sp->stats.tx_errors; 4920 delta = ((u64) le32_to_cpu(stats->rmac_vld_frms_oflow) << 32 |
4924 sp->stats.tx_errors = le32_to_cpu(stats->tmac_any_err_frms); 4921 le32_to_cpu(stats->rmac_vld_frms)) - sp->stats.rx_packets;
4925 4922 sp->stats.rx_packets += delta;
4926 dev->stats.rx_errors += le64_to_cpu(stats->rmac_drop_frms) - 4923 dev->stats.rx_packets += delta;
4927 sp->stats.rx_errors; 4924
4928 sp->stats.rx_errors = le64_to_cpu(stats->rmac_drop_frms); 4925 delta = ((u64) le32_to_cpu(stats->tmac_frms_oflow) << 32 |
4929 4926 le32_to_cpu(stats->tmac_frms)) - sp->stats.tx_packets;
4930 dev->stats.multicast = le32_to_cpu(stats->rmac_vld_mcst_frms) - 4927 sp->stats.tx_packets += delta;
4931 sp->stats.multicast; 4928 dev->stats.tx_packets += delta;
4932 sp->stats.multicast = le32_to_cpu(stats->rmac_vld_mcst_frms); 4929
4933 4930 delta = ((u64) le32_to_cpu(stats->rmac_data_octets_oflow) << 32 |
4934 dev->stats.rx_length_errors = le64_to_cpu(stats->rmac_long_frms) - 4931 le32_to_cpu(stats->rmac_data_octets)) - sp->stats.rx_bytes;
4935 sp->stats.rx_length_errors; 4932 sp->stats.rx_bytes += delta;
4936 sp->stats.rx_length_errors = le64_to_cpu(stats->rmac_long_frms); 4933 dev->stats.rx_bytes += delta;
4934
4935 delta = ((u64) le32_to_cpu(stats->tmac_data_octets_oflow) << 32 |
4936 le32_to_cpu(stats->tmac_data_octets)) - sp->stats.tx_bytes;
4937 sp->stats.tx_bytes += delta;
4938 dev->stats.tx_bytes += delta;
4939
4940 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_errors;
4941 sp->stats.rx_errors += delta;
4942 dev->stats.rx_errors += delta;
4943
4944 delta = ((u64) le32_to_cpu(stats->tmac_any_err_frms_oflow) << 32 |
4945 le32_to_cpu(stats->tmac_any_err_frms)) - sp->stats.tx_errors;
4946 sp->stats.tx_errors += delta;
4947 dev->stats.tx_errors += delta;
4948
4949 delta = le64_to_cpu(stats->rmac_drop_frms) - sp->stats.rx_dropped;
4950 sp->stats.rx_dropped += delta;
4951 dev->stats.rx_dropped += delta;
4952
4953 delta = le64_to_cpu(stats->tmac_drop_frms) - sp->stats.tx_dropped;
4954 sp->stats.tx_dropped += delta;
4955 dev->stats.tx_dropped += delta;
4956
4957 /* The adapter MAC interprets pause frames as multicast packets, but
4958 * does not pass them up. This erroneously increases the multicast
4959 * packet count and needs to be deducted when the multicast frame count
4960 * is queried.
4961 */
4962 delta = (u64) le32_to_cpu(stats->rmac_vld_mcst_frms_oflow) << 32 |
4963 le32_to_cpu(stats->rmac_vld_mcst_frms);
4964 delta -= le64_to_cpu(stats->rmac_pause_ctrl_frms);
4965 delta -= sp->stats.multicast;
4966 sp->stats.multicast += delta;
4967 dev->stats.multicast += delta;
4937 4968
4938 /* collect per-ring rx_packets and rx_bytes */ 4969 delta = ((u64) le32_to_cpu(stats->rmac_usized_frms_oflow) << 32 |
4939 dev->stats.rx_packets = dev->stats.rx_bytes = 0; 4970 le32_to_cpu(stats->rmac_usized_frms)) +
4940 for (i = 0; i < config->rx_ring_num; i++) { 4971 le64_to_cpu(stats->rmac_long_frms) - sp->stats.rx_length_errors;
4941 struct ring_info *ring = &mac_control->rings[i]; 4972 sp->stats.rx_length_errors += delta;
4973 dev->stats.rx_length_errors += delta;
4942 4974
4943 dev->stats.rx_packets += ring->rx_packets; 4975 delta = le64_to_cpu(stats->rmac_fcs_err_frms) - sp->stats.rx_crc_errors;
4944 dev->stats.rx_bytes += ring->rx_bytes; 4976 sp->stats.rx_crc_errors += delta;
4945 } 4977 dev->stats.rx_crc_errors += delta;
4946 4978
4947 return &dev->stats; 4979 return &dev->stats;
4948} 4980}
@@ -7455,15 +7487,11 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7455 } 7487 }
7456 } 7488 }
7457 7489
7458 /* Updating statistics */
7459 ring_data->rx_packets++;
7460 rxdp->Host_Control = 0; 7490 rxdp->Host_Control = 0;
7461 if (sp->rxd_mode == RXD_MODE_1) { 7491 if (sp->rxd_mode == RXD_MODE_1) {
7462 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2); 7492 int len = RXD_GET_BUFFER0_SIZE_1(rxdp->Control_2);
7463 7493
7464 ring_data->rx_bytes += len;
7465 skb_put(skb, len); 7494 skb_put(skb, len);
7466
7467 } else if (sp->rxd_mode == RXD_MODE_3B) { 7495 } else if (sp->rxd_mode == RXD_MODE_3B) {
7468 int get_block = ring_data->rx_curr_get_info.block_index; 7496 int get_block = ring_data->rx_curr_get_info.block_index;
7469 int get_off = ring_data->rx_curr_get_info.offset; 7497 int get_off = ring_data->rx_curr_get_info.offset;
@@ -7472,7 +7500,6 @@ static int rx_osm_handler(struct ring_info *ring_data, struct RxD_t * rxdp)
7472 unsigned char *buff = skb_push(skb, buf0_len); 7500 unsigned char *buff = skb_push(skb, buf0_len);
7473 7501
7474 struct buffAdd *ba = &ring_data->ba[get_block][get_off]; 7502 struct buffAdd *ba = &ring_data->ba[get_block][get_off];
7475 ring_data->rx_bytes += buf0_len + buf2_len;
7476 memcpy(buff, ba->ba_0, buf0_len); 7503 memcpy(buff, ba->ba_0, buf0_len);
7477 skb_put(skb, buf2_len); 7504 skb_put(skb, buf2_len);
7478 } 7505 }
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 47c36e0994f5..5e52c75892df 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -745,10 +745,6 @@ struct ring_info {
745 745
746 /* Buffer Address store. */ 746 /* Buffer Address store. */
747 struct buffAdd **ba; 747 struct buffAdd **ba;
748
749 /* per-Ring statistics */
750 unsigned long rx_packets;
751 unsigned long rx_bytes;
752} ____cacheline_aligned; 748} ____cacheline_aligned;
753 749
754/* Fifo specific structure */ 750/* Fifo specific structure */
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 1f3acc3a5dfd..79eee3062083 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -2671,6 +2671,7 @@ static struct platform_driver sbmac_driver = {
2671 .remove = __exit_p(sbmac_remove), 2671 .remove = __exit_p(sbmac_remove),
2672 .driver = { 2672 .driver = {
2673 .name = sbmac_string, 2673 .name = sbmac_string,
2674 .owner = THIS_MODULE,
2674 }, 2675 },
2675}; 2676};
2676 2677
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index 28d3ee175e7b..dd8a4adf48ca 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -104,10 +104,8 @@ static void rndis_msg_indicate(struct usbnet *dev, struct rndis_indicate *msg,
104int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) 104int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
105{ 105{
106 struct cdc_state *info = (void *) &dev->data; 106 struct cdc_state *info = (void *) &dev->data;
107 struct usb_cdc_notification notification;
108 int master_ifnum; 107 int master_ifnum;
109 int retval; 108 int retval;
110 int partial;
111 unsigned count; 109 unsigned count;
112 __le32 rsp; 110 __le32 rsp;
113 u32 xid = 0, msg_len, request_id; 111 u32 xid = 0, msg_len, request_id;
@@ -135,17 +133,13 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen)
135 if (unlikely(retval < 0 || xid == 0)) 133 if (unlikely(retval < 0 || xid == 0))
136 return retval; 134 return retval;
137 135
138 /* Some devices don't respond on the control channel until 136 // FIXME Seems like some devices discard responses when
139 * polled on the status channel, so do that first. */ 137 // we time out and cancel our "get response" requests...
140 retval = usb_interrupt_msg( 138 // so, this is fragile. Probably need to poll for status.
141 dev->udev,
142 usb_rcvintpipe(dev->udev, dev->status->desc.bEndpointAddress),
143 &notification, sizeof(notification), &partial,
144 RNDIS_CONTROL_TIMEOUT_MS);
145 if (unlikely(retval < 0))
146 return retval;
147 139
148 /* Poll the control channel; the request probably completed immediately */ 140 /* ignore status endpoint, just poll the control channel;
141 * the request probably completed immediately
142 */
149 rsp = buf->msg_type | RNDIS_MSG_COMPLETION; 143 rsp = buf->msg_type | RNDIS_MSG_COMPLETION;
150 for (count = 0; count < 10; count++) { 144 for (count = 0; count < 10; count++) {
151 memset(buf, 0, CONTROL_BUFFER_SIZE); 145 memset(buf, 0, CONTROL_BUFFER_SIZE);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index a95c73de5824..81c76ada8e56 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1293,6 +1293,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1293 goto out; 1293 goto out;
1294 } 1294 }
1295 1295
1296 /* netdev_printk() needs this so do it as early as possible */
1297 SET_NETDEV_DEV(net, &udev->dev);
1298
1296 dev = netdev_priv(net); 1299 dev = netdev_priv(net);
1297 dev->udev = xdev; 1300 dev->udev = xdev;
1298 dev->intf = udev; 1301 dev->intf = udev;
@@ -1377,8 +1380,6 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1377 dev->rx_urb_size = dev->hard_mtu; 1380 dev->rx_urb_size = dev->hard_mtu;
1378 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1381 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1379 1382
1380 SET_NETDEV_DEV(net, &udev->dev);
1381
1382 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1383 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1383 SET_NETDEV_DEVTYPE(net, &wlan_type); 1384 SET_NETDEV_DEVTYPE(net, &wlan_type);
1384 if ((dev->driver_info->flags & FLAG_WWAN) != 0) 1385 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 1edb7a61983c..bb6b67f6b0cc 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -415,7 +415,7 @@ static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
415static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp) 415static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
416{ 416{
417 int err; 417 int err;
418 bool oom = false; 418 bool oom;
419 419
420 do { 420 do {
421 if (vi->mergeable_rx_bufs) 421 if (vi->mergeable_rx_bufs)
@@ -425,10 +425,9 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
425 else 425 else
426 err = add_recvbuf_small(vi, gfp); 426 err = add_recvbuf_small(vi, gfp);
427 427
428 if (err < 0) { 428 oom = err == -ENOMEM;
429 oom = true; 429 if (err < 0)
430 break; 430 break;
431 }
432 ++vi->num; 431 ++vi->num;
433 } while (err > 0); 432 } while (err > 0);
434 if (unlikely(vi->num > vi->max)) 433 if (unlikely(vi->num > vi->max))
@@ -563,7 +562,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
563 struct virtnet_info *vi = netdev_priv(dev); 562 struct virtnet_info *vi = netdev_priv(dev);
564 int capacity; 563 int capacity;
565 564
566again:
567 /* Free up any pending old buffers before queueing new ones. */ 565 /* Free up any pending old buffers before queueing new ones. */
568 free_old_xmit_skbs(vi); 566 free_old_xmit_skbs(vi);
569 567
@@ -572,14 +570,20 @@ again:
572 570
573 /* This can happen with OOM and indirect buffers. */ 571 /* This can happen with OOM and indirect buffers. */
574 if (unlikely(capacity < 0)) { 572 if (unlikely(capacity < 0)) {
575 netif_stop_queue(dev); 573 if (net_ratelimit()) {
576 dev_warn(&dev->dev, "Unexpected full queue\n"); 574 if (likely(capacity == -ENOMEM)) {
577 if (unlikely(!virtqueue_enable_cb(vi->svq))) { 575 dev_warn(&dev->dev,
578 virtqueue_disable_cb(vi->svq); 576 "TX queue failure: out of memory\n");
579 netif_start_queue(dev); 577 } else {
580 goto again; 578 dev->stats.tx_fifo_errors++;
579 dev_warn(&dev->dev,
580 "Unexpected TX queue failure: %d\n",
581 capacity);
582 }
581 } 583 }
582 return NETDEV_TX_BUSY; 584 dev->stats.tx_dropped++;
585 kfree_skb(skb);
586 return NETDEV_TX_OK;
583 } 587 }
584 virtqueue_kick(vi->svq); 588 virtqueue_kick(vi->svq);
585 589
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index d14e207de1df..fc8b2d7a0919 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -4517,9 +4517,9 @@ vxge_starter(void)
4517 char version[32]; 4517 char version[32];
4518 snprintf(version, 32, "%s", DRV_VERSION); 4518 snprintf(version, 32, "%s", DRV_VERSION);
4519 4519
4520 printk(KERN_CRIT "%s: Copyright(c) 2002-2009 Neterion Inc\n", 4520 printk(KERN_INFO "%s: Copyright(c) 2002-2009 Neterion Inc\n",
4521 VXGE_DRIVER_NAME); 4521 VXGE_DRIVER_NAME);
4522 printk(KERN_CRIT "%s: Driver version: %s\n", 4522 printk(KERN_INFO "%s: Driver version: %s\n",
4523 VXGE_DRIVER_NAME, version); 4523 VXGE_DRIVER_NAME, version);
4524 4524
4525 verify_bandwidth(); 4525 verify_bandwidth();
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index fbb7dec6ddeb..5ea87736a6ae 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -445,6 +445,7 @@ void ath_deinit_leds(struct ath_softc *sc);
445#define SC_OP_TSF_RESET BIT(11) 445#define SC_OP_TSF_RESET BIT(11)
446#define SC_OP_BT_PRIORITY_DETECTED BIT(12) 446#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
447#define SC_OP_BT_SCAN BIT(13) 447#define SC_OP_BT_SCAN BIT(13)
448#define SC_OP_ANI_RUN BIT(14)
448 449
449/* Powersave flags */ 450/* Powersave flags */
450#define PS_WAIT_FOR_BEACON BIT(0) 451#define PS_WAIT_FOR_BEACON BIT(0)
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index abfa0493236f..1e2a68ea9355 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -336,6 +336,10 @@ set_timer:
336static void ath_start_ani(struct ath_common *common) 336static void ath_start_ani(struct ath_common *common)
337{ 337{
338 unsigned long timestamp = jiffies_to_msecs(jiffies); 338 unsigned long timestamp = jiffies_to_msecs(jiffies);
339 struct ath_softc *sc = (struct ath_softc *) common->priv;
340
341 if (!(sc->sc_flags & SC_OP_ANI_RUN))
342 return;
339 343
340 common->ani.longcal_timer = timestamp; 344 common->ani.longcal_timer = timestamp;
341 common->ani.shortcal_timer = timestamp; 345 common->ani.shortcal_timer = timestamp;
@@ -872,11 +876,13 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
872 /* Reset rssi stats */ 876 /* Reset rssi stats */
873 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 877 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
874 878
879 sc->sc_flags |= SC_OP_ANI_RUN;
875 ath_start_ani(common); 880 ath_start_ani(common);
876 } else { 881 } else {
877 ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n"); 882 ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
878 common->curaid = 0; 883 common->curaid = 0;
879 /* Stop ANI */ 884 /* Stop ANI */
885 sc->sc_flags &= ~SC_OP_ANI_RUN;
880 del_timer_sync(&common->ani.timer); 886 del_timer_sync(&common->ani.timer);
881 } 887 }
882} 888}
@@ -1478,8 +1484,10 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1478 1484
1479 if (vif->type == NL80211_IFTYPE_AP || 1485 if (vif->type == NL80211_IFTYPE_AP ||
1480 vif->type == NL80211_IFTYPE_ADHOC || 1486 vif->type == NL80211_IFTYPE_ADHOC ||
1481 vif->type == NL80211_IFTYPE_MONITOR) 1487 vif->type == NL80211_IFTYPE_MONITOR) {
1488 sc->sc_flags |= SC_OP_ANI_RUN;
1482 ath_start_ani(common); 1489 ath_start_ani(common);
1490 }
1483 1491
1484out: 1492out:
1485 mutex_unlock(&sc->mutex); 1493 mutex_unlock(&sc->mutex);
@@ -1500,6 +1508,7 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1500 mutex_lock(&sc->mutex); 1508 mutex_lock(&sc->mutex);
1501 1509
1502 /* Stop ANI */ 1510 /* Stop ANI */
1511 sc->sc_flags &= ~SC_OP_ANI_RUN;
1503 del_timer_sync(&common->ani.timer); 1512 del_timer_sync(&common->ani.timer);
1504 1513
1505 /* Reclaim beacon resources */ 1514 /* Reclaim beacon resources */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index 44ef5d93befc..01658cf82d39 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -212,11 +212,7 @@ static void iwlagn_chain_noise_reset(struct iwl_priv *priv)
212static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info, 212static void iwlagn_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
213 __le32 *tx_flags) 213 __le32 *tx_flags)
214{ 214{
215 if ((info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) || 215 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
216 (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT))
217 *tx_flags |= TX_CMD_FLG_RTS_CTS_MSK;
218 else
219 *tx_flags &= ~TX_CMD_FLG_RTS_CTS_MSK;
220} 216}
221 217
222/* Calc max signal level (dBm) among 3 possible receivers */ 218/* Calc max signal level (dBm) among 3 possible receivers */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 426e95567de3..5bbc5298ef96 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -1314,7 +1314,6 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
1314 changed_flags, *total_flags); 1314 changed_flags, *total_flags);
1315 1315
1316 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); 1316 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
1317 CHK(FIF_ALLMULTI, RXON_FILTER_ACCEPT_GRP_MSK);
1318 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); 1317 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK);
1319 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); 1318 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
1320 1319
@@ -1329,6 +1328,12 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
1329 1328
1330 mutex_unlock(&priv->mutex); 1329 mutex_unlock(&priv->mutex);
1331 1330
1331 /*
1332 * Receiving all multicast frames is always enabled by the
1333 * default flags setup in iwl_connection_init_rx_config()
1334 * since we currently do not support programming multicast
1335 * filters into the device.
1336 */
1332 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS | 1337 *total_flags &= FIF_OTHER_BSS | FIF_ALLMULTI | FIF_PROMISC_IN_BSS |
1333 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 1338 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
1334} 1339}
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index df5b6b971f26..57a593c58cf4 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -98,7 +98,8 @@ static void tx_poll_start(struct vhost_net *net, struct socket *sock)
98static void handle_tx(struct vhost_net *net) 98static void handle_tx(struct vhost_net *net)
99{ 99{
100 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX]; 100 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_TX];
101 unsigned head, out, in, s; 101 unsigned out, in, s;
102 int head;
102 struct msghdr msg = { 103 struct msghdr msg = {
103 .msg_name = NULL, 104 .msg_name = NULL,
104 .msg_namelen = 0, 105 .msg_namelen = 0,
@@ -135,6 +136,9 @@ static void handle_tx(struct vhost_net *net)
135 ARRAY_SIZE(vq->iov), 136 ARRAY_SIZE(vq->iov),
136 &out, &in, 137 &out, &in,
137 NULL, NULL); 138 NULL, NULL);
139 /* On error, stop handling until the next kick. */
140 if (unlikely(head < 0))
141 break;
138 /* Nothing new? Wait for eventfd to tell us they refilled. */ 142 /* Nothing new? Wait for eventfd to tell us they refilled. */
139 if (head == vq->num) { 143 if (head == vq->num) {
140 wmem = atomic_read(&sock->sk->sk_wmem_alloc); 144 wmem = atomic_read(&sock->sk->sk_wmem_alloc);
@@ -192,7 +196,8 @@ static void handle_tx(struct vhost_net *net)
192static void handle_rx(struct vhost_net *net) 196static void handle_rx(struct vhost_net *net)
193{ 197{
194 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX]; 198 struct vhost_virtqueue *vq = &net->dev.vqs[VHOST_NET_VQ_RX];
195 unsigned head, out, in, log, s; 199 unsigned out, in, log, s;
200 int head;
196 struct vhost_log *vq_log; 201 struct vhost_log *vq_log;
197 struct msghdr msg = { 202 struct msghdr msg = {
198 .msg_name = NULL, 203 .msg_name = NULL,
@@ -228,6 +233,9 @@ static void handle_rx(struct vhost_net *net)
228 ARRAY_SIZE(vq->iov), 233 ARRAY_SIZE(vq->iov),
229 &out, &in, 234 &out, &in,
230 vq_log, &log); 235 vq_log, &log);
236 /* On error, stop handling until the next kick. */
237 if (unlikely(head < 0))
238 break;
231 /* OK, now we need to know about added descriptors. */ 239 /* OK, now we need to know about added descriptors. */
232 if (head == vq->num) { 240 if (head == vq->num) {
233 if (unlikely(vhost_enable_notify(vq))) { 241 if (unlikely(vhost_enable_notify(vq))) {
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 3b83382e06eb..0b99783083f6 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -736,12 +736,12 @@ static int translate_desc(struct vhost_dev *dev, u64 addr, u32 len,
736 mem = rcu_dereference(dev->memory); 736 mem = rcu_dereference(dev->memory);
737 while ((u64)len > s) { 737 while ((u64)len > s) {
738 u64 size; 738 u64 size;
739 if (ret >= iov_size) { 739 if (unlikely(ret >= iov_size)) {
740 ret = -ENOBUFS; 740 ret = -ENOBUFS;
741 break; 741 break;
742 } 742 }
743 reg = find_region(mem, addr, len); 743 reg = find_region(mem, addr, len);
744 if (!reg) { 744 if (unlikely(!reg)) {
745 ret = -EFAULT; 745 ret = -EFAULT;
746 break; 746 break;
747 } 747 }
@@ -780,18 +780,18 @@ static unsigned next_desc(struct vring_desc *desc)
780 return next; 780 return next;
781} 781}
782 782
783static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq, 783static int get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
784 struct iovec iov[], unsigned int iov_size, 784 struct iovec iov[], unsigned int iov_size,
785 unsigned int *out_num, unsigned int *in_num, 785 unsigned int *out_num, unsigned int *in_num,
786 struct vhost_log *log, unsigned int *log_num, 786 struct vhost_log *log, unsigned int *log_num,
787 struct vring_desc *indirect) 787 struct vring_desc *indirect)
788{ 788{
789 struct vring_desc desc; 789 struct vring_desc desc;
790 unsigned int i = 0, count, found = 0; 790 unsigned int i = 0, count, found = 0;
791 int ret; 791 int ret;
792 792
793 /* Sanity check */ 793 /* Sanity check */
794 if (indirect->len % sizeof desc) { 794 if (unlikely(indirect->len % sizeof desc)) {
795 vq_err(vq, "Invalid length in indirect descriptor: " 795 vq_err(vq, "Invalid length in indirect descriptor: "
796 "len 0x%llx not multiple of 0x%zx\n", 796 "len 0x%llx not multiple of 0x%zx\n",
797 (unsigned long long)indirect->len, 797 (unsigned long long)indirect->len,
@@ -801,7 +801,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
801 801
802 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect, 802 ret = translate_desc(dev, indirect->addr, indirect->len, vq->indirect,
803 ARRAY_SIZE(vq->indirect)); 803 ARRAY_SIZE(vq->indirect));
804 if (ret < 0) { 804 if (unlikely(ret < 0)) {
805 vq_err(vq, "Translation failure %d in indirect.\n", ret); 805 vq_err(vq, "Translation failure %d in indirect.\n", ret);
806 return ret; 806 return ret;
807 } 807 }
@@ -813,7 +813,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
813 count = indirect->len / sizeof desc; 813 count = indirect->len / sizeof desc;
814 /* Buffers are chained via a 16 bit next field, so 814 /* Buffers are chained via a 16 bit next field, so
815 * we can have at most 2^16 of these. */ 815 * we can have at most 2^16 of these. */
816 if (count > USHRT_MAX + 1) { 816 if (unlikely(count > USHRT_MAX + 1)) {
817 vq_err(vq, "Indirect buffer length too big: %d\n", 817 vq_err(vq, "Indirect buffer length too big: %d\n",
818 indirect->len); 818 indirect->len);
819 return -E2BIG; 819 return -E2BIG;
@@ -821,19 +821,19 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
821 821
822 do { 822 do {
823 unsigned iov_count = *in_num + *out_num; 823 unsigned iov_count = *in_num + *out_num;
824 if (++found > count) { 824 if (unlikely(++found > count)) {
825 vq_err(vq, "Loop detected: last one at %u " 825 vq_err(vq, "Loop detected: last one at %u "
826 "indirect size %u\n", 826 "indirect size %u\n",
827 i, count); 827 i, count);
828 return -EINVAL; 828 return -EINVAL;
829 } 829 }
830 if (memcpy_fromiovec((unsigned char *)&desc, vq->indirect, 830 if (unlikely(memcpy_fromiovec((unsigned char *)&desc, vq->indirect,
831 sizeof desc)) { 831 sizeof desc))) {
832 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n", 832 vq_err(vq, "Failed indirect descriptor: idx %d, %zx\n",
833 i, (size_t)indirect->addr + i * sizeof desc); 833 i, (size_t)indirect->addr + i * sizeof desc);
834 return -EINVAL; 834 return -EINVAL;
835 } 835 }
836 if (desc.flags & VRING_DESC_F_INDIRECT) { 836 if (unlikely(desc.flags & VRING_DESC_F_INDIRECT)) {
837 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n", 837 vq_err(vq, "Nested indirect descriptor: idx %d, %zx\n",
838 i, (size_t)indirect->addr + i * sizeof desc); 838 i, (size_t)indirect->addr + i * sizeof desc);
839 return -EINVAL; 839 return -EINVAL;
@@ -841,7 +841,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
841 841
842 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, 842 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
843 iov_size - iov_count); 843 iov_size - iov_count);
844 if (ret < 0) { 844 if (unlikely(ret < 0)) {
845 vq_err(vq, "Translation failure %d indirect idx %d\n", 845 vq_err(vq, "Translation failure %d indirect idx %d\n",
846 ret, i); 846 ret, i);
847 return ret; 847 return ret;
@@ -857,7 +857,7 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
857 } else { 857 } else {
858 /* If it's an output descriptor, they're all supposed 858 /* If it's an output descriptor, they're all supposed
859 * to come before any input descriptors. */ 859 * to come before any input descriptors. */
860 if (*in_num) { 860 if (unlikely(*in_num)) {
861 vq_err(vq, "Indirect descriptor " 861 vq_err(vq, "Indirect descriptor "
862 "has out after in: idx %d\n", i); 862 "has out after in: idx %d\n", i);
863 return -EINVAL; 863 return -EINVAL;
@@ -873,12 +873,13 @@ static unsigned get_indirect(struct vhost_dev *dev, struct vhost_virtqueue *vq,
873 * number of output then some number of input descriptors, it's actually two 873 * number of output then some number of input descriptors, it's actually two
874 * iovecs, but we pack them into one and note how many of each there were. 874 * iovecs, but we pack them into one and note how many of each there were.
875 * 875 *
876 * This function returns the descriptor number found, or vq->num (which 876 * This function returns the descriptor number found, or vq->num (which is
877 * is never a valid descriptor number) if none was found. */ 877 * never a valid descriptor number) if none was found. A negative code is
878unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq, 878 * returned on error. */
879 struct iovec iov[], unsigned int iov_size, 879int vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
880 unsigned int *out_num, unsigned int *in_num, 880 struct iovec iov[], unsigned int iov_size,
881 struct vhost_log *log, unsigned int *log_num) 881 unsigned int *out_num, unsigned int *in_num,
882 struct vhost_log *log, unsigned int *log_num)
882{ 883{
883 struct vring_desc desc; 884 struct vring_desc desc;
884 unsigned int i, head, found = 0; 885 unsigned int i, head, found = 0;
@@ -887,16 +888,16 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
887 888
888 /* Check it isn't doing very strange things with descriptor numbers. */ 889 /* Check it isn't doing very strange things with descriptor numbers. */
889 last_avail_idx = vq->last_avail_idx; 890 last_avail_idx = vq->last_avail_idx;
890 if (get_user(vq->avail_idx, &vq->avail->idx)) { 891 if (unlikely(get_user(vq->avail_idx, &vq->avail->idx))) {
891 vq_err(vq, "Failed to access avail idx at %p\n", 892 vq_err(vq, "Failed to access avail idx at %p\n",
892 &vq->avail->idx); 893 &vq->avail->idx);
893 return vq->num; 894 return -EFAULT;
894 } 895 }
895 896
896 if ((u16)(vq->avail_idx - last_avail_idx) > vq->num) { 897 if (unlikely((u16)(vq->avail_idx - last_avail_idx) > vq->num)) {
897 vq_err(vq, "Guest moved used index from %u to %u", 898 vq_err(vq, "Guest moved used index from %u to %u",
898 last_avail_idx, vq->avail_idx); 899 last_avail_idx, vq->avail_idx);
899 return vq->num; 900 return -EFAULT;
900 } 901 }
901 902
902 /* If there's nothing new since last we looked, return invalid. */ 903 /* If there's nothing new since last we looked, return invalid. */
@@ -908,18 +909,19 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
908 909
909 /* Grab the next descriptor number they're advertising, and increment 910 /* Grab the next descriptor number they're advertising, and increment
910 * the index we've seen. */ 911 * the index we've seen. */
911 if (get_user(head, &vq->avail->ring[last_avail_idx % vq->num])) { 912 if (unlikely(get_user(head,
913 &vq->avail->ring[last_avail_idx % vq->num]))) {
912 vq_err(vq, "Failed to read head: idx %d address %p\n", 914 vq_err(vq, "Failed to read head: idx %d address %p\n",
913 last_avail_idx, 915 last_avail_idx,
914 &vq->avail->ring[last_avail_idx % vq->num]); 916 &vq->avail->ring[last_avail_idx % vq->num]);
915 return vq->num; 917 return -EFAULT;
916 } 918 }
917 919
918 /* If their number is silly, that's an error. */ 920 /* If their number is silly, that's an error. */
919 if (head >= vq->num) { 921 if (unlikely(head >= vq->num)) {
920 vq_err(vq, "Guest says index %u > %u is available", 922 vq_err(vq, "Guest says index %u > %u is available",
921 head, vq->num); 923 head, vq->num);
922 return vq->num; 924 return -EINVAL;
923 } 925 }
924 926
925 /* When we start there are none of either input nor output. */ 927 /* When we start there are none of either input nor output. */
@@ -930,41 +932,41 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
930 i = head; 932 i = head;
931 do { 933 do {
932 unsigned iov_count = *in_num + *out_num; 934 unsigned iov_count = *in_num + *out_num;
933 if (i >= vq->num) { 935 if (unlikely(i >= vq->num)) {
934 vq_err(vq, "Desc index is %u > %u, head = %u", 936 vq_err(vq, "Desc index is %u > %u, head = %u",
935 i, vq->num, head); 937 i, vq->num, head);
936 return vq->num; 938 return -EINVAL;
937 } 939 }
938 if (++found > vq->num) { 940 if (unlikely(++found > vq->num)) {
939 vq_err(vq, "Loop detected: last one at %u " 941 vq_err(vq, "Loop detected: last one at %u "
940 "vq size %u head %u\n", 942 "vq size %u head %u\n",
941 i, vq->num, head); 943 i, vq->num, head);
942 return vq->num; 944 return -EINVAL;
943 } 945 }
944 ret = copy_from_user(&desc, vq->desc + i, sizeof desc); 946 ret = copy_from_user(&desc, vq->desc + i, sizeof desc);
945 if (ret) { 947 if (unlikely(ret)) {
946 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n", 948 vq_err(vq, "Failed to get descriptor: idx %d addr %p\n",
947 i, vq->desc + i); 949 i, vq->desc + i);
948 return vq->num; 950 return -EFAULT;
949 } 951 }
950 if (desc.flags & VRING_DESC_F_INDIRECT) { 952 if (desc.flags & VRING_DESC_F_INDIRECT) {
951 ret = get_indirect(dev, vq, iov, iov_size, 953 ret = get_indirect(dev, vq, iov, iov_size,
952 out_num, in_num, 954 out_num, in_num,
953 log, log_num, &desc); 955 log, log_num, &desc);
954 if (ret < 0) { 956 if (unlikely(ret < 0)) {
955 vq_err(vq, "Failure detected " 957 vq_err(vq, "Failure detected "
956 "in indirect descriptor at idx %d\n", i); 958 "in indirect descriptor at idx %d\n", i);
957 return vq->num; 959 return ret;
958 } 960 }
959 continue; 961 continue;
960 } 962 }
961 963
962 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count, 964 ret = translate_desc(dev, desc.addr, desc.len, iov + iov_count,
963 iov_size - iov_count); 965 iov_size - iov_count);
964 if (ret < 0) { 966 if (unlikely(ret < 0)) {
965 vq_err(vq, "Translation failure %d descriptor idx %d\n", 967 vq_err(vq, "Translation failure %d descriptor idx %d\n",
966 ret, i); 968 ret, i);
967 return vq->num; 969 return ret;
968 } 970 }
969 if (desc.flags & VRING_DESC_F_WRITE) { 971 if (desc.flags & VRING_DESC_F_WRITE) {
970 /* If this is an input descriptor, 972 /* If this is an input descriptor,
@@ -978,10 +980,10 @@ unsigned vhost_get_vq_desc(struct vhost_dev *dev, struct vhost_virtqueue *vq,
978 } else { 980 } else {
979 /* If it's an output descriptor, they're all supposed 981 /* If it's an output descriptor, they're all supposed
980 * to come before any input descriptors. */ 982 * to come before any input descriptors. */
981 if (*in_num) { 983 if (unlikely(*in_num)) {
982 vq_err(vq, "Descriptor has out after in: " 984 vq_err(vq, "Descriptor has out after in: "
983 "idx %d\n", i); 985 "idx %d\n", i);
984 return vq->num; 986 return -EINVAL;
985 } 987 }
986 *out_num += ret; 988 *out_num += ret;
987 } 989 }
diff --git a/drivers/vhost/vhost.h b/drivers/vhost/vhost.h
index 44591ba9b07a..11ee13dba0f7 100644
--- a/drivers/vhost/vhost.h
+++ b/drivers/vhost/vhost.h
@@ -120,10 +120,10 @@ long vhost_dev_ioctl(struct vhost_dev *, unsigned int ioctl, unsigned long arg);
120int vhost_vq_access_ok(struct vhost_virtqueue *vq); 120int vhost_vq_access_ok(struct vhost_virtqueue *vq);
121int vhost_log_access_ok(struct vhost_dev *); 121int vhost_log_access_ok(struct vhost_dev *);
122 122
123unsigned vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *, 123int vhost_get_vq_desc(struct vhost_dev *, struct vhost_virtqueue *,
124 struct iovec iov[], unsigned int iov_count, 124 struct iovec iov[], unsigned int iov_count,
125 unsigned int *out_num, unsigned int *in_num, 125 unsigned int *out_num, unsigned int *in_num,
126 struct vhost_log *log, unsigned int *log_num); 126 struct vhost_log *log, unsigned int *log_num);
127void vhost_discard_vq_desc(struct vhost_virtqueue *); 127void vhost_discard_vq_desc(struct vhost_virtqueue *);
128 128
129int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len); 129int vhost_add_used(struct vhost_virtqueue *, unsigned int head, int len);
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 276b40a16835..b4207ca3ad52 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -379,6 +379,8 @@ struct ethtool_rxnfc {
379 __u32 flow_type; 379 __u32 flow_type;
380 /* The rx flow hash value or the rule DB size */ 380 /* The rx flow hash value or the rule DB size */
381 __u64 data; 381 __u64 data;
382 /* The following fields are not valid and must not be used for
383 * the ETHTOOL_{G,X}RXFH commands. */
382 struct ethtool_rx_flow_spec fs; 384 struct ethtool_rx_flow_spec fs;
383 __u32 rule_cnt; 385 __u32 rule_cnt;
384 __u32 rule_locs[0]; 386 __u32 rule_locs[0];
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index cbbbe9bfecad..30b0c4e78f91 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -19,6 +19,11 @@ struct mv643xx_eth_shared_platform_data {
19 struct mbus_dram_target_info *dram; 19 struct mbus_dram_target_info *dram;
20 struct platform_device *shared_smi; 20 struct platform_device *shared_smi;
21 unsigned int t_clk; 21 unsigned int t_clk;
22 /*
23 * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
24 * limit of 9KiB will be used.
25 */
26 int tx_csum_limit;
22}; 27};
23 28
24#define MV643XX_ETH_PHY_ADDR_DEFAULT 0 29#define MV643XX_ETH_PHY_ADDR_DEFAULT 0
diff --git a/include/linux/net.h b/include/linux/net.h
index 2b4deeeb8646..dee0b11a8759 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -129,10 +129,9 @@ struct socket_wq {
129 * @type: socket type (%SOCK_STREAM, etc) 129 * @type: socket type (%SOCK_STREAM, etc)
130 * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc) 130 * @flags: socket flags (%SOCK_ASYNC_NOSPACE, etc)
131 * @ops: protocol specific socket operations 131 * @ops: protocol specific socket operations
132 * @fasync_list: Asynchronous wake up list
133 * @file: File back pointer for gc 132 * @file: File back pointer for gc
134 * @sk: internal networking protocol agnostic socket representation 133 * @sk: internal networking protocol agnostic socket representation
135 * @wait: wait queue for several uses 134 * @wq: wait queue for several uses
136 */ 135 */
137struct socket { 136struct socket {
138 socket_state state; 137 socket_state state;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 40291f375024..b21e4054c12c 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1656,6 +1656,9 @@ static inline int netif_is_multiqueue(const struct net_device *dev)
1656 return (dev->num_tx_queues > 1); 1656 return (dev->num_tx_queues > 1);
1657} 1657}
1658 1658
1659extern void netif_set_real_num_tx_queues(struct net_device *dev,
1660 unsigned int txq);
1661
1659/* Use this variant when it is known for sure that it 1662/* Use this variant when it is known for sure that it
1660 * is executing from hardware interrupt context or with hardware interrupts 1663 * is executing from hardware interrupt context or with hardware interrupts
1661 * disabled. 1664 * disabled.
@@ -2329,7 +2332,7 @@ do { \
2329#endif 2332#endif
2330 2333
2331#if defined(VERBOSE_DEBUG) 2334#if defined(VERBOSE_DEBUG)
2332#define netif_vdbg netdev_dbg 2335#define netif_vdbg netif_dbg
2333#else 2336#else
2334#define netif_vdbg(priv, type, dev, format, args...) \ 2337#define netif_vdbg(priv, type, dev, format, args...) \
2335({ \ 2338({ \
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 03ca5d826757..433604bb3fe8 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -313,12 +313,24 @@ extern void qdisc_calculate_pkt_len(struct sk_buff *skb,
313extern void tcf_destroy(struct tcf_proto *tp); 313extern void tcf_destroy(struct tcf_proto *tp);
314extern void tcf_destroy_chain(struct tcf_proto **fl); 314extern void tcf_destroy_chain(struct tcf_proto **fl);
315 315
316/* Reset all TX qdiscs of a device. */ 316/* Reset all TX qdiscs greater then index of a device. */
317static inline void qdisc_reset_all_tx_gt(struct net_device *dev, unsigned int i)
318{
319 struct Qdisc *qdisc;
320
321 for (; i < dev->num_tx_queues; i++) {
322 qdisc = netdev_get_tx_queue(dev, i)->qdisc;
323 if (qdisc) {
324 spin_lock_bh(qdisc_lock(qdisc));
325 qdisc_reset(qdisc);
326 spin_unlock_bh(qdisc_lock(qdisc));
327 }
328 }
329}
330
317static inline void qdisc_reset_all_tx(struct net_device *dev) 331static inline void qdisc_reset_all_tx(struct net_device *dev)
318{ 332{
319 unsigned int i; 333 qdisc_reset_all_tx_gt(dev, 0);
320 for (i = 0; i < dev->num_tx_queues; i++)
321 qdisc_reset(netdev_get_tx_queue(dev, i)->qdisc);
322} 334}
323 335
324/* Are all TX queues of the device empty? */ 336/* Are all TX queues of the device empty? */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 1913af67c43d..fc8f36dd0f5c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1586,7 +1586,7 @@ static inline struct xfrm_state *xfrm_input_state(struct sk_buff *skb)
1586static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m) 1586static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
1587{ 1587{
1588 if (attrs[XFRMA_MARK]) 1588 if (attrs[XFRMA_MARK])
1589 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(m)); 1589 memcpy(m, nla_data(attrs[XFRMA_MARK]), sizeof(struct xfrm_mark));
1590 else 1590 else
1591 m->v = m->m = 0; 1591 m->v = m->m = 0;
1592 1592
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 9d21d98ae5fa..27ae946363f1 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -99,6 +99,15 @@ static struct net_bridge_mdb_entry *__br_mdb_ip_get(
99 return NULL; 99 return NULL;
100} 100}
101 101
102static struct net_bridge_mdb_entry *br_mdb_ip_get(
103 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
104{
105 if (!mdb)
106 return NULL;
107
108 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
109}
110
102static struct net_bridge_mdb_entry *br_mdb_ip4_get( 111static struct net_bridge_mdb_entry *br_mdb_ip4_get(
103 struct net_bridge_mdb_htable *mdb, __be32 dst) 112 struct net_bridge_mdb_htable *mdb, __be32 dst)
104{ 113{
@@ -107,7 +116,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip4_get(
107 br_dst.u.ip4 = dst; 116 br_dst.u.ip4 = dst;
108 br_dst.proto = htons(ETH_P_IP); 117 br_dst.proto = htons(ETH_P_IP);
109 118
110 return __br_mdb_ip_get(mdb, &br_dst, __br_ip4_hash(mdb, dst)); 119 return br_mdb_ip_get(mdb, &br_dst);
111} 120}
112 121
113#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 122#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -119,23 +128,17 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(
119 ipv6_addr_copy(&br_dst.u.ip6, dst); 128 ipv6_addr_copy(&br_dst.u.ip6, dst);
120 br_dst.proto = htons(ETH_P_IPV6); 129 br_dst.proto = htons(ETH_P_IPV6);
121 130
122 return __br_mdb_ip_get(mdb, &br_dst, __br_ip6_hash(mdb, dst)); 131 return br_mdb_ip_get(mdb, &br_dst);
123} 132}
124#endif 133#endif
125 134
126static struct net_bridge_mdb_entry *br_mdb_ip_get(
127 struct net_bridge_mdb_htable *mdb, struct br_ip *dst)
128{
129 return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst));
130}
131
132struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, 135struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,
133 struct sk_buff *skb) 136 struct sk_buff *skb)
134{ 137{
135 struct net_bridge_mdb_htable *mdb = br->mdb; 138 struct net_bridge_mdb_htable *mdb = br->mdb;
136 struct br_ip ip; 139 struct br_ip ip;
137 140
138 if (!mdb || br->multicast_disabled) 141 if (br->multicast_disabled)
139 return NULL; 142 return NULL;
140 143
141 if (BR_INPUT_SKB_CB(skb)->igmp) 144 if (BR_INPUT_SKB_CB(skb)->igmp)
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 44420992f72f..8fb75f89c4aa 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -591,6 +591,9 @@ static unsigned int br_nf_pre_routing(unsigned int hook, struct sk_buff *skb,
591 591
592 pskb_trim_rcsum(skb, len); 592 pskb_trim_rcsum(skb, len);
593 593
594 /* BUG: Should really parse the IP options here. */
595 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
596
594 nf_bridge_put(skb->nf_bridge); 597 nf_bridge_put(skb->nf_bridge);
595 if (!nf_bridge_alloc(skb)) 598 if (!nf_bridge_alloc(skb))
596 return NF_DROP; 599 return NF_DROP;
diff --git a/net/core/dev.c b/net/core/dev.c
index 2b3bf53bc687..723a34710ad4 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1553,6 +1553,24 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1553 rcu_read_unlock(); 1553 rcu_read_unlock();
1554} 1554}
1555 1555
1556/*
1557 * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
1558 * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
1559 */
1560void netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
1561{
1562 unsigned int real_num = dev->real_num_tx_queues;
1563
1564 if (unlikely(txq > dev->num_tx_queues))
1565 ;
1566 else if (txq > real_num)
1567 dev->real_num_tx_queues = txq;
1568 else if (txq < real_num) {
1569 dev->real_num_tx_queues = txq;
1570 qdisc_reset_all_tx_gt(dev, txq);
1571 }
1572}
1573EXPORT_SYMBOL(netif_set_real_num_tx_queues);
1556 1574
1557static inline void __netif_reschedule(struct Qdisc *q) 1575static inline void __netif_reschedule(struct Qdisc *q)
1558{ 1576{
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a0f4964033d2..75e4ffeb8cc9 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -318,23 +318,33 @@ out:
318} 318}
319 319
320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev, 320static noinline_for_stack int ethtool_set_rxnfc(struct net_device *dev,
321 void __user *useraddr) 321 u32 cmd, void __user *useraddr)
322{ 322{
323 struct ethtool_rxnfc cmd; 323 struct ethtool_rxnfc info;
324 size_t info_size = sizeof(info);
324 325
325 if (!dev->ethtool_ops->set_rxnfc) 326 if (!dev->ethtool_ops->set_rxnfc)
326 return -EOPNOTSUPP; 327 return -EOPNOTSUPP;
327 328
328 if (copy_from_user(&cmd, useraddr, sizeof(cmd))) 329 /* struct ethtool_rxnfc was originally defined for
330 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
331 * members. User-space might still be using that
332 * definition. */
333 if (cmd == ETHTOOL_SRXFH)
334 info_size = (offsetof(struct ethtool_rxnfc, data) +
335 sizeof(info.data));
336
337 if (copy_from_user(&info, useraddr, info_size))
329 return -EFAULT; 338 return -EFAULT;
330 339
331 return dev->ethtool_ops->set_rxnfc(dev, &cmd); 340 return dev->ethtool_ops->set_rxnfc(dev, &info);
332} 341}
333 342
334static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev, 343static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
335 void __user *useraddr) 344 u32 cmd, void __user *useraddr)
336{ 345{
337 struct ethtool_rxnfc info; 346 struct ethtool_rxnfc info;
347 size_t info_size = sizeof(info);
338 const struct ethtool_ops *ops = dev->ethtool_ops; 348 const struct ethtool_ops *ops = dev->ethtool_ops;
339 int ret; 349 int ret;
340 void *rule_buf = NULL; 350 void *rule_buf = NULL;
@@ -342,13 +352,22 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
342 if (!ops->get_rxnfc) 352 if (!ops->get_rxnfc)
343 return -EOPNOTSUPP; 353 return -EOPNOTSUPP;
344 354
345 if (copy_from_user(&info, useraddr, sizeof(info))) 355 /* struct ethtool_rxnfc was originally defined for
356 * ETHTOOL_{G,S}RXFH with only the cmd, flow_type and data
357 * members. User-space might still be using that
358 * definition. */
359 if (cmd == ETHTOOL_GRXFH)
360 info_size = (offsetof(struct ethtool_rxnfc, data) +
361 sizeof(info.data));
362
363 if (copy_from_user(&info, useraddr, info_size))
346 return -EFAULT; 364 return -EFAULT;
347 365
348 if (info.cmd == ETHTOOL_GRXCLSRLALL) { 366 if (info.cmd == ETHTOOL_GRXCLSRLALL) {
349 if (info.rule_cnt > 0) { 367 if (info.rule_cnt > 0) {
350 rule_buf = kmalloc(info.rule_cnt * sizeof(u32), 368 if (info.rule_cnt <= KMALLOC_MAX_SIZE / sizeof(u32))
351 GFP_USER); 369 rule_buf = kmalloc(info.rule_cnt * sizeof(u32),
370 GFP_USER);
352 if (!rule_buf) 371 if (!rule_buf)
353 return -ENOMEM; 372 return -ENOMEM;
354 } 373 }
@@ -359,7 +378,7 @@ static noinline_for_stack int ethtool_get_rxnfc(struct net_device *dev,
359 goto err_out; 378 goto err_out;
360 379
361 ret = -EFAULT; 380 ret = -EFAULT;
362 if (copy_to_user(useraddr, &info, sizeof(info))) 381 if (copy_to_user(useraddr, &info, info_size))
363 goto err_out; 382 goto err_out;
364 383
365 if (rule_buf) { 384 if (rule_buf) {
@@ -1516,12 +1535,12 @@ int dev_ethtool(struct net *net, struct ifreq *ifr)
1516 case ETHTOOL_GRXCLSRLCNT: 1535 case ETHTOOL_GRXCLSRLCNT:
1517 case ETHTOOL_GRXCLSRULE: 1536 case ETHTOOL_GRXCLSRULE:
1518 case ETHTOOL_GRXCLSRLALL: 1537 case ETHTOOL_GRXCLSRLALL:
1519 rc = ethtool_get_rxnfc(dev, useraddr); 1538 rc = ethtool_get_rxnfc(dev, ethcmd, useraddr);
1520 break; 1539 break;
1521 case ETHTOOL_SRXFH: 1540 case ETHTOOL_SRXFH:
1522 case ETHTOOL_SRXCLSRLDEL: 1541 case ETHTOOL_SRXCLSRLDEL:
1523 case ETHTOOL_SRXCLSRLINS: 1542 case ETHTOOL_SRXCLSRLINS:
1524 rc = ethtool_set_rxnfc(dev, useraddr); 1543 rc = ethtool_set_rxnfc(dev, ethcmd, useraddr);
1525 break; 1544 break;
1526 case ETHTOOL_GGRO: 1545 case ETHTOOL_GGRO:
1527 rc = ethtool_get_gro(dev, useraddr); 1546 rc = ethtool_get_gro(dev, useraddr);
diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
index 1705476670ef..23883a48ebfb 100644
--- a/net/ipv4/xfrm4_policy.c
+++ b/net/ipv4/xfrm4_policy.c
@@ -108,6 +108,8 @@ _decode_session4(struct sk_buff *skb, struct flowi *fl, int reverse)
108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4; 108 u8 *xprth = skb_network_header(skb) + iph->ihl * 4;
109 109
110 memset(fl, 0, sizeof(struct flowi)); 110 memset(fl, 0, sizeof(struct flowi));
111 fl->mark = skb->mark;
112
111 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) { 113 if (!(iph->frag_off & htons(IP_MF | IP_OFFSET))) {
112 switch (iph->protocol) { 114 switch (iph->protocol) {
113 case IPPROTO_UDP: 115 case IPPROTO_UDP:
diff --git a/net/ipv6/netfilter/ip6t_REJECT.c b/net/ipv6/netfilter/ip6t_REJECT.c
index 47d227713758..2933396e0281 100644
--- a/net/ipv6/netfilter/ip6t_REJECT.c
+++ b/net/ipv6/netfilter/ip6t_REJECT.c
@@ -97,9 +97,11 @@ static void send_reset(struct net *net, struct sk_buff *oldskb)
97 fl.fl_ip_dport = otcph.source; 97 fl.fl_ip_dport = otcph.source;
98 security_skb_classify_flow(oldskb, &fl); 98 security_skb_classify_flow(oldskb, &fl);
99 dst = ip6_route_output(net, NULL, &fl); 99 dst = ip6_route_output(net, NULL, &fl);
100 if (dst == NULL) 100 if (dst == NULL || dst->error) {
101 dst_release(dst);
101 return; 102 return;
102 if (dst->error || xfrm_lookup(net, &dst, &fl, NULL, 0)) 103 }
104 if (xfrm_lookup(net, &dst, &fl, NULL, 0))
103 return; 105 return;
104 106
105 hh_len = (dst->dev->hard_header_len + 15)&~15; 107 hh_len = (dst->dev->hard_header_len + 15)&~15;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 4a0e77e14468..6baeabbbca82 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -124,6 +124,8 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
124 u8 nexthdr = nh[IP6CB(skb)->nhoff]; 124 u8 nexthdr = nh[IP6CB(skb)->nhoff];
125 125
126 memset(fl, 0, sizeof(struct flowi)); 126 memset(fl, 0, sizeof(struct flowi));
127 fl->mark = skb->mark;
128
127 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr); 129 ipv6_addr_copy(&fl->fl6_dst, reverse ? &hdr->saddr : &hdr->daddr);
128 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr); 130 ipv6_addr_copy(&fl->fl6_src, reverse ? &hdr->daddr : &hdr->saddr);
129 131