aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2011-01-11 19:32:41 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-11 19:32:41 -0500
commit4162cf64973df51fc885825bc9ca4d055891c49f (patch)
treef218c7bd298f4d41be94d08a314eb9fbc3fcb4ea /net
parentfb7b5a956992fdc53d0b9c8ea29b51b92839dc1b (diff)
parent343a8d13aae58dec562dbb5c7d48a53e9b847871 (diff)
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-2.6: (67 commits) cxgb4vf: recover from failure in cxgb4vf_open() netfilter: ebtables: make broute table work again netfilter: fix race in conntrack between dump_table and destroy ah: reload pointers to skb data after calling skb_cow_data() ah: update maximum truncated ICV length xfrm: check trunc_len in XFRMA_ALG_AUTH_TRUNC ehea: Increase the skb array usage net/fec: remove config FEC2 as it's used nowhere pcnet_cs: add new_id tcp: disallow bind() to reuse addr/port net/r8169: Update the function of parsing firmware net: ppp: use {get,put}_unaligned_be{16,32} CAIF: Fix IPv6 support in receive path for GPRS/3G arp: allow to invalidate specific ARP entries net_sched: factorize qdisc stats handling mlx4: Call alloc_etherdev to allocate RX and TX queues net: Add alloc_netdev_mqs function caif: don't set connection request param size before copying data cxgb4vf: fix mailbox data/control coherency domain race qlcnic: change module parameter permissions ...
Diffstat (limited to 'net')
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/chnl_net.c18
-rw-r--r--net/core/dev.c149
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/sysctl.c4
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/arp.c29
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c45
-rw-r--r--net/ipv4/netfilter/ip_tables.c45
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c45
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/phonet/af_phonet.c6
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_drr.c8
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c17
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/xfrm/xfrm_user.c6
45 files changed, 235 insertions, 279 deletions
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf503796..8184c031d028 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock,
740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) 740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
741 return -ENOPROTOOPT; 741 return -ENOPROTOOPT;
742 lock_sock(&(cf_sk->sk)); 742 lock_sock(&(cf_sk->sk));
743 cf_sk->conn_req.param.size = ol;
744 if (ol > sizeof(cf_sk->conn_req.param.data) || 743 if (ol > sizeof(cf_sk->conn_req.param.data) ||
745 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { 744 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
746 release_sock(&cf_sk->sk); 745 release_sock(&cf_sk->sk);
747 return -EINVAL; 746 return -EINVAL;
748 } 747 }
748 cf_sk->conn_req.param.size = ol;
749 release_sock(&cf_sk->sk); 749 release_sock(&cf_sk->sk);
750 return 0; 750 return 0;
751 751
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c98941..fa9dab372b68 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
77 int pktlen; 77 int pktlen;
78 int err = 0; 78 int err = 0;
79 const u8 *ip_version;
80 u8 buf;
79 81
80 priv = container_of(layr, struct chnl_net, chnl); 82 priv = container_of(layr, struct chnl_net, chnl);
81 83
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
90 * send the packet to the net stack. 92 * send the packet to the net stack.
91 */ 93 */
92 skb->dev = priv->netdev; 94 skb->dev = priv->netdev;
93 skb->protocol = htons(ETH_P_IP); 95
96 /* check the version of IP */
97 ip_version = skb_header_pointer(skb, 0, 1, &buf);
98 if (!ip_version)
99 return -EINVAL;
100 switch (*ip_version >> 4) {
101 case 4:
102 skb->protocol = htons(ETH_P_IP);
103 break;
104 case 6:
105 skb->protocol = htons(ETH_P_IPV6);
106 break;
107 default:
108 return -EINVAL;
109 }
94 110
95 /* If we change the header in loop mode, the checksum is corrupted. */ 111 /* If we change the header in loop mode, the checksum is corrupted. */
96 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) 112 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269d2e35..a3ef808b5e36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev)
1732} 1732}
1733EXPORT_SYMBOL(netif_device_attach); 1733EXPORT_SYMBOL(netif_device_attach);
1734 1734
1735static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1736{
1737 return ((features & NETIF_F_NO_CSUM) ||
1738 ((features & NETIF_F_V4_CSUM) &&
1739 protocol == htons(ETH_P_IP)) ||
1740 ((features & NETIF_F_V6_CSUM) &&
1741 protocol == htons(ETH_P_IPV6)) ||
1742 ((features & NETIF_F_FCOE_CRC) &&
1743 protocol == htons(ETH_P_FCOE)));
1744}
1745
1746static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1747{
1748 __be16 protocol = skb->protocol;
1749 int features = dev->features;
1750
1751 if (vlan_tx_tag_present(skb)) {
1752 features &= dev->vlan_features;
1753 } else if (protocol == htons(ETH_P_8021Q)) {
1754 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1755 protocol = veh->h_vlan_encapsulated_proto;
1756 features &= dev->vlan_features;
1757 }
1758
1759 return can_checksum_protocol(features, protocol);
1760}
1761
1762/** 1735/**
1763 * skb_dev_set -- assign a new device to a buffer 1736 * skb_dev_set -- assign a new device to a buffer
1764 * @skb: buffer for the new device 1737 * @skb: buffer for the new device
@@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
1971/** 1944/**
1972 * dev_gso_segment - Perform emulated hardware segmentation on skb. 1945 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1973 * @skb: buffer to segment 1946 * @skb: buffer to segment
1947 * @features: device features as applicable to this skb
1974 * 1948 *
1975 * This function segments the given skb and stores the list of segments 1949 * This function segments the given skb and stores the list of segments
1976 * in skb->next. 1950 * in skb->next.
1977 */ 1951 */
1978static int dev_gso_segment(struct sk_buff *skb) 1952static int dev_gso_segment(struct sk_buff *skb, int features)
1979{ 1953{
1980 struct net_device *dev = skb->dev;
1981 struct sk_buff *segs; 1954 struct sk_buff *segs;
1982 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1983 NETIF_F_SG : 0);
1984 1955
1985 segs = skb_gso_segment(skb, features); 1956 segs = skb_gso_segment(skb, features);
1986 1957
@@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb)
2017 } 1988 }
2018} 1989}
2019 1990
2020int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) 1991static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1992{
1993 return ((features & NETIF_F_GEN_CSUM) ||
1994 ((features & NETIF_F_V4_CSUM) &&
1995 protocol == htons(ETH_P_IP)) ||
1996 ((features & NETIF_F_V6_CSUM) &&
1997 protocol == htons(ETH_P_IPV6)) ||
1998 ((features & NETIF_F_FCOE_CRC) &&
1999 protocol == htons(ETH_P_FCOE)));
2000}
2001
2002static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
2003{
2004 if (!can_checksum_protocol(protocol, features)) {
2005 features &= ~NETIF_F_ALL_CSUM;
2006 features &= ~NETIF_F_SG;
2007 } else if (illegal_highdma(skb->dev, skb)) {
2008 features &= ~NETIF_F_SG;
2009 }
2010
2011 return features;
2012}
2013
2014int netif_skb_features(struct sk_buff *skb)
2021{ 2015{
2022 __be16 protocol = skb->protocol; 2016 __be16 protocol = skb->protocol;
2017 int features = skb->dev->features;
2023 2018
2024 if (protocol == htons(ETH_P_8021Q)) { 2019 if (protocol == htons(ETH_P_8021Q)) {
2025 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2020 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2026 protocol = veh->h_vlan_encapsulated_proto; 2021 protocol = veh->h_vlan_encapsulated_proto;
2027 } else if (!skb->vlan_tci) 2022 } else if (!vlan_tx_tag_present(skb)) {
2028 return dev->features; 2023 return harmonize_features(skb, protocol, features);
2024 }
2029 2025
2030 if (protocol != htons(ETH_P_8021Q)) 2026 features &= skb->dev->vlan_features;
2031 return dev->features & dev->vlan_features; 2027
2032 else 2028 if (protocol != htons(ETH_P_8021Q)) {
2033 return 0; 2029 return harmonize_features(skb, protocol, features);
2030 } else {
2031 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2032 NETIF_F_GEN_CSUM;
2033 return harmonize_features(skb, protocol, features);
2034 }
2034} 2035}
2035EXPORT_SYMBOL(netif_get_vlan_features); 2036EXPORT_SYMBOL(netif_skb_features);
2036 2037
2037/* 2038/*
2038 * Returns true if either: 2039 * Returns true if either:
@@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features);
2042 * support DMA from it. 2043 * support DMA from it.
2043 */ 2044 */
2044static inline int skb_needs_linearize(struct sk_buff *skb, 2045static inline int skb_needs_linearize(struct sk_buff *skb,
2045 struct net_device *dev) 2046 int features)
2046{ 2047{
2047 if (skb_is_nonlinear(skb)) { 2048 return skb_is_nonlinear(skb) &&
2048 int features = dev->features; 2049 ((skb_has_frag_list(skb) &&
2049 2050 !(features & NETIF_F_FRAGLIST)) ||
2050 if (vlan_tx_tag_present(skb))
2051 features &= dev->vlan_features;
2052
2053 return (skb_has_frag_list(skb) &&
2054 !(features & NETIF_F_FRAGLIST)) ||
2055 (skb_shinfo(skb)->nr_frags && 2051 (skb_shinfo(skb)->nr_frags &&
2056 (!(features & NETIF_F_SG) || 2052 !(features & NETIF_F_SG)));
2057 illegal_highdma(dev, skb)));
2058 }
2059
2060 return 0;
2061} 2053}
2062 2054
2063int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2055int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2067 int rc = NETDEV_TX_OK; 2059 int rc = NETDEV_TX_OK;
2068 2060
2069 if (likely(!skb->next)) { 2061 if (likely(!skb->next)) {
2062 int features;
2063
2070 /* 2064 /*
2071 * If device doesnt need skb->dst, release it right now while 2065 * If device doesnt need skb->dst, release it right now while
2072 * its hot in this cpu cache 2066 * its hot in this cpu cache
@@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2079 2073
2080 skb_orphan_try(skb); 2074 skb_orphan_try(skb);
2081 2075
2076 features = netif_skb_features(skb);
2077
2082 if (vlan_tx_tag_present(skb) && 2078 if (vlan_tx_tag_present(skb) &&
2083 !(dev->features & NETIF_F_HW_VLAN_TX)) { 2079 !(features & NETIF_F_HW_VLAN_TX)) {
2084 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2080 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2085 if (unlikely(!skb)) 2081 if (unlikely(!skb))
2086 goto out; 2082 goto out;
@@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2088 skb->vlan_tci = 0; 2084 skb->vlan_tci = 0;
2089 } 2085 }
2090 2086
2091 if (netif_needs_gso(dev, skb)) { 2087 if (netif_needs_gso(skb, features)) {
2092 if (unlikely(dev_gso_segment(skb))) 2088 if (unlikely(dev_gso_segment(skb, features)))
2093 goto out_kfree_skb; 2089 goto out_kfree_skb;
2094 if (skb->next) 2090 if (skb->next)
2095 goto gso; 2091 goto gso;
2096 } else { 2092 } else {
2097 if (skb_needs_linearize(skb, dev) && 2093 if (skb_needs_linearize(skb, features) &&
2098 __skb_linearize(skb)) 2094 __skb_linearize(skb))
2099 goto out_kfree_skb; 2095 goto out_kfree_skb;
2100 2096
@@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2105 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2101 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2106 skb_set_transport_header(skb, 2102 skb_set_transport_header(skb,
2107 skb_checksum_start_offset(skb)); 2103 skb_checksum_start_offset(skb));
2108 if (!dev_can_checksum(dev, skb) && 2104 if (!(features & NETIF_F_ALL_CSUM) &&
2109 skb_checksum_help(skb)) 2105 skb_checksum_help(skb))
2110 goto out_kfree_skb; 2106 goto out_kfree_skb;
2111 } 2107 }
@@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2301 */ 2297 */
2302 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2298 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2303 skb_dst_force(skb); 2299 skb_dst_force(skb);
2304 __qdisc_update_bstats(q, skb->len); 2300
2301 qdisc_skb_cb(skb)->pkt_len = skb->len;
2302 qdisc_bstats_update(q, skb);
2303
2305 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2304 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2306 if (unlikely(contended)) { 2305 if (unlikely(contended)) {
2307 spin_unlock(&q->busylock); 2306 spin_unlock(&q->busylock);
@@ -5621,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5621} 5620}
5622 5621
5623/** 5622/**
5624 * alloc_netdev_mq - allocate network device 5623 * alloc_netdev_mqs - allocate network device
5625 * @sizeof_priv: size of private data to allocate space for 5624 * @sizeof_priv: size of private data to allocate space for
5626 * @name: device name format string 5625 * @name: device name format string
5627 * @setup: callback to initialize device 5626 * @setup: callback to initialize device
5628 * @queue_count: the number of subqueues to allocate 5627 * @txqs: the number of TX subqueues to allocate
5628 * @rxqs: the number of RX subqueues to allocate
5629 * 5629 *
5630 * Allocates a struct net_device with private data area for driver use 5630 * Allocates a struct net_device with private data area for driver use
5631 * and performs basic initialization. Also allocates subquue structs 5631 * and performs basic initialization. Also allocates subquue structs
5632 * for each queue on the device at the end of the netdevice. 5632 * for each queue on the device.
5633 */ 5633 */
5634struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 5634struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5635 void (*setup)(struct net_device *), unsigned int queue_count) 5635 void (*setup)(struct net_device *),
5636 unsigned int txqs, unsigned int rxqs)
5636{ 5637{
5637 struct net_device *dev; 5638 struct net_device *dev;
5638 size_t alloc_size; 5639 size_t alloc_size;
@@ -5640,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5640 5641
5641 BUG_ON(strlen(name) >= sizeof(dev->name)); 5642 BUG_ON(strlen(name) >= sizeof(dev->name));
5642 5643
5643 if (queue_count < 1) { 5644 if (txqs < 1) {
5644 pr_err("alloc_netdev: Unable to allocate device " 5645 pr_err("alloc_netdev: Unable to allocate device "
5645 "with zero queues.\n"); 5646 "with zero queues.\n");
5646 return NULL; 5647 return NULL;
5647 } 5648 }
5648 5649
5650#ifdef CONFIG_RPS
5651 if (rxqs < 1) {
5652 pr_err("alloc_netdev: Unable to allocate device "
5653 "with zero RX queues.\n");
5654 return NULL;
5655 }
5656#endif
5657
5649 alloc_size = sizeof(struct net_device); 5658 alloc_size = sizeof(struct net_device);
5650 if (sizeof_priv) { 5659 if (sizeof_priv) {
5651 /* ensure 32-byte alignment of private area */ 5660 /* ensure 32-byte alignment of private area */
@@ -5676,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5676 5685
5677 dev_net_set(dev, &init_net); 5686 dev_net_set(dev, &init_net);
5678 5687
5679 dev->num_tx_queues = queue_count; 5688 dev->num_tx_queues = txqs;
5680 dev->real_num_tx_queues = queue_count; 5689 dev->real_num_tx_queues = txqs;
5681 if (netif_alloc_netdev_queues(dev)) 5690 if (netif_alloc_netdev_queues(dev))
5682 goto free_pcpu; 5691 goto free_pcpu;
5683 5692
5684#ifdef CONFIG_RPS 5693#ifdef CONFIG_RPS
5685 dev->num_rx_queues = queue_count; 5694 dev->num_rx_queues = rxqs;
5686 dev->real_num_rx_queues = queue_count; 5695 dev->real_num_rx_queues = rxqs;
5687 if (netif_alloc_rx_queues(dev)) 5696 if (netif_alloc_rx_queues(dev))
5688 goto free_pcpu; 5697 goto free_pcpu;
5689#endif 5698#endif
@@ -5711,7 +5720,7 @@ free_p:
5711 kfree(p); 5720 kfree(p);
5712 return NULL; 5721 return NULL;
5713} 5722}
5714EXPORT_SYMBOL(alloc_netdev_mq); 5723EXPORT_SYMBOL(alloc_netdev_mqs);
5715 5724
5716/** 5725/**
5717 * free_netdev - free network device 5726 * free_netdev - free network device
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b27d4efdd48..afc58374ca96 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter);
158/** 158/**
159 * sk_run_filter - run a filter on a socket 159 * sk_run_filter - run a filter on a socket
160 * @skb: buffer to run the filter on 160 * @skb: buffer to run the filter on
161 * @filter: filter to apply 161 * @fentry: filter to apply
162 * 162 *
163 * Decode and apply filter instructions to the skb->data. 163 * Decode and apply filter instructions to the skb->data.
164 * Return length to keep, 0 for none. @skb is the data we are 164 * Return length to keep, 0 for none. @skb is the data we are
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57f3bb3..a5f7535aab5b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) 1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
1821 return -EPERM; 1821 return -EPERM;
1822 1822
1823 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 1823 if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
1824 struct sock *rtnl; 1824 struct sock *rtnl;
1825 rtnl_dumpit_func dumpit; 1825 rtnl_dumpit_func dumpit;
1826 1826
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 45087052d894..5fdb07229017 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq)
426{ 426{
427 struct dccp_sock *dp = dccp_sk(sk); 427 struct dccp_sock *dp = dccp_sk(sk);
428 428
429 dp->dccps_gsr = seq; 429 if (after48(seq, dp->dccps_gsr))
430 dp->dccps_gsr = seq;
430 /* Sequence validity window depends on remote Sequence Window (7.5.1) */ 431 /* Sequence validity window depends on remote Sequence Window (7.5.1) */
431 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); 432 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
432 /* 433 /*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247ea007..8cde009e8b85 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
260 */ 260 */
261 if (time_before(now, (dp->dccps_rate_last + 261 if (time_before(now, (dp->dccps_rate_last +
262 sysctl_dccp_sync_ratelimit))) 262 sysctl_dccp_sync_ratelimit)))
263 return 0; 263 return -1;
264 264
265 DCCP_WARN("Step 6 failed for %s packet, " 265 DCCP_WARN("Step 6 failed for %s packet, "
266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " 266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 563943822e58..42348824ee31 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
21/* Boundary values */ 21/* Boundary values */
22static int zero = 0, 22static int zero = 0,
23 u8_max = 0xFF; 23 u8_max = 0xFF;
24static unsigned long seqw_min = 32; 24static unsigned long seqw_min = DCCPF_SEQ_WMIN,
25 seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
25 26
26static struct ctl_table dccp_default_table[] = { 27static struct ctl_table dccp_default_table[] = {
27 { 28 {
@@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = {
31 .mode = 0644, 32 .mode = 0644,
32 .proc_handler = proc_doulongvec_minmax, 33 .proc_handler = proc_doulongvec_minmax,
33 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ 34 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */
35 .extra2 = &seqw_max,
34 }, 36 },
35 { 37 {
36 .procname = "rx_ccid", 38 .procname = "rx_ccid",
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f1d814..f9d7ac924f15 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev)
347EXPORT_SYMBOL(ether_setup); 347EXPORT_SYMBOL(ether_setup);
348 348
349/** 349/**
350 * alloc_etherdev_mq - Allocates and sets up an Ethernet device 350 * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
351 * @sizeof_priv: Size of additional driver-private structure to be allocated 351 * @sizeof_priv: Size of additional driver-private structure to be allocated
352 * for this Ethernet device 352 * for this Ethernet device
353 * @queue_count: The number of queues this device has. 353 * @txqs: The number of TX queues this device has.
354 * @txqs: The number of RX queues this device has.
354 * 355 *
355 * Fill in the fields of the device structure with Ethernet-generic 356 * Fill in the fields of the device structure with Ethernet-generic
356 * values. Basically does everything except registering the device. 357 * values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup);
360 * this private data area. 361 * this private data area.
361 */ 362 */
362 363
363struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) 364struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
365 unsigned int rxqs)
364{ 366{
365 return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); 367 return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
366} 368}
367EXPORT_SYMBOL(alloc_etherdev_mq); 369EXPORT_SYMBOL(alloc_etherdev_mqs);
368 370
369static size_t _format_mac_addr(char *buf, int buflen, 371static size_t _format_mac_addr(char *buf, int buflen,
370 const unsigned char *addr, int len) 372 const unsigned char *addr, int len)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec6dce0..86961bec70ab 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
314 314
315 skb->ip_summed = CHECKSUM_NONE; 315 skb->ip_summed = CHECKSUM_NONE;
316 316
317 ah = (struct ip_auth_hdr *)skb->data;
318 iph = ip_hdr(skb);
319 ihl = ip_hdrlen(skb);
320 317
321 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 318 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
322 goto out; 319 goto out;
323 nfrags = err; 320 nfrags = err;
324 321
322 ah = (struct ip_auth_hdr *)skb->data;
323 iph = ip_hdr(skb);
324 ihl = ip_hdrlen(skb);
325
325 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); 326 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
326 if (!work_iph) 327 if (!work_iph)
327 goto out; 328 goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b961dbc..04c8b69fd426 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1143 return err; 1143 return err;
1144} 1144}
1145 1145
1146int arp_invalidate(struct net_device *dev, __be32 ip)
1147{
1148 struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
1149 int err = -ENXIO;
1150
1151 if (neigh) {
1152 if (neigh->nud_state & ~NUD_NOARP)
1153 err = neigh_update(neigh, NULL, NUD_FAILED,
1154 NEIGH_UPDATE_F_OVERRIDE|
1155 NEIGH_UPDATE_F_ADMIN);
1156 neigh_release(neigh);
1157 }
1158
1159 return err;
1160}
1161EXPORT_SYMBOL(arp_invalidate);
1162
1146static int arp_req_delete_public(struct net *net, struct arpreq *r, 1163static int arp_req_delete_public(struct net *net, struct arpreq *r,
1147 struct net_device *dev) 1164 struct net_device *dev)
1148{ 1165{
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1163{ 1180{
1164 int err; 1181 int err;
1165 __be32 ip; 1182 __be32 ip;
1166 struct neighbour *neigh;
1167 1183
1168 if (r->arp_flags & ATF_PUBL) 1184 if (r->arp_flags & ATF_PUBL)
1169 return arp_req_delete_public(net, r, dev); 1185 return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1181 if (!dev) 1197 if (!dev)
1182 return -EINVAL; 1198 return -EINVAL;
1183 } 1199 }
1184 err = -ENXIO; 1200 return arp_invalidate(dev, ip);
1185 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1186 if (neigh) {
1187 if (neigh->nud_state & ~NUD_NOARP)
1188 err = neigh_update(neigh, NULL, NUD_FAILED,
1189 NEIGH_UPDATE_F_OVERRIDE|
1190 NEIGH_UPDATE_F_ADMIN);
1191 neigh_release(neigh);
1192 }
1193 return err;
1194} 1201}
1195 1202
1196/* 1203/*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e318153f14..97e5fb765265 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
73 !sk2->sk_bound_dev_if || 73 !sk2->sk_bound_dev_if ||
74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
75 if (!reuse || !sk2->sk_reuse || 75 if (!reuse || !sk2->sk_reuse ||
76 sk2->sk_state == TCP_LISTEN) { 76 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 79 sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 122 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
126 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
126 spin_unlock(&head->lock); 127 spin_unlock(&head->lock);
127 snum = smallest_rover; 128 snum = smallest_rover;
128 goto have_snum; 129 goto have_snum;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fce..2746c1fa6417 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
858 nlmsg_len(nlh) < hdrlen) 858 nlmsg_len(nlh) < hdrlen)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 if (nlh->nlmsg_flags & NLM_F_DUMP) { 861 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
862 if (nlmsg_attrlen(nlh, hdrlen)) { 862 if (nlmsg_attrlen(nlh, hdrlen)) {
863 struct nlattr *attr; 863 struct nlattr *attr;
864 864
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340a28d5..e855fffaed95 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
710 struct arpt_entry *iter; 710 struct arpt_entry *iter;
711 unsigned int cpu; 711 unsigned int cpu;
712 unsigned int i; 712 unsigned int i;
713 unsigned int curcpu = get_cpu();
714
715 /* Instead of clearing (by a previous call to memset())
716 * the counters and using adds, we set the counters
717 * with data used by 'current' CPU
718 *
719 * Bottom half has to be disabled to prevent deadlock
720 * if new softirq were to run and call ipt_do_table
721 */
722 local_bh_disable();
723 i = 0;
724 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
725 SET_COUNTER(counters[i], iter->counters.bcnt,
726 iter->counters.pcnt);
727 ++i;
728 }
729 local_bh_enable();
730 /* Processing counters from other cpus, we can let bottom half enabled,
731 * (preemption is disabled)
732 */
733 713
734 for_each_possible_cpu(cpu) { 714 for_each_possible_cpu(cpu) {
735 if (cpu == curcpu) 715 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
736 continue; 716
737 i = 0; 717 i = 0;
738 local_bh_disable();
739 xt_info_wrlock(cpu);
740 xt_entry_foreach(iter, t->entries[cpu], t->size) { 718 xt_entry_foreach(iter, t->entries[cpu], t->size) {
741 ADD_COUNTER(counters[i], iter->counters.bcnt, 719 u64 bcnt, pcnt;
742 iter->counters.pcnt); 720 unsigned int start;
721
722 do {
723 start = read_seqbegin(lock);
724 bcnt = iter->counters.bcnt;
725 pcnt = iter->counters.pcnt;
726 } while (read_seqretry(lock, start));
727
728 ADD_COUNTER(counters[i], bcnt, pcnt);
743 ++i; 729 ++i;
744 } 730 }
745 xt_info_wrunlock(cpu);
746 local_bh_enable();
747 } 731 }
748 put_cpu();
749} 732}
750 733
751static struct xt_counters *alloc_counters(const struct xt_table *table) 734static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
759 * about). 742 * about).
760 */ 743 */
761 countersize = sizeof(struct xt_counters) * private->number; 744 countersize = sizeof(struct xt_counters) * private->number;
762 counters = vmalloc(countersize); 745 counters = vzalloc(countersize);
763 746
764 if (counters == NULL) 747 if (counters == NULL)
765 return ERR_PTR(-ENOMEM); 748 return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
1007 struct arpt_entry *iter; 990 struct arpt_entry *iter;
1008 991
1009 ret = 0; 992 ret = 0;
1010 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 993 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1011 if (!counters) { 994 if (!counters) {
1012 ret = -ENOMEM; 995 ret = -ENOMEM;
1013 goto out; 996 goto out;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d633b3b6..652efea013dc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
884 struct ipt_entry *iter; 884 struct ipt_entry *iter;
885 unsigned int cpu; 885 unsigned int cpu;
886 unsigned int i; 886 unsigned int i;
887 unsigned int curcpu = get_cpu();
888
889 /* Instead of clearing (by a previous call to memset())
890 * the counters and using adds, we set the counters
891 * with data used by 'current' CPU.
892 *
893 * Bottom half has to be disabled to prevent deadlock
894 * if new softirq were to run and call ipt_do_table
895 */
896 local_bh_disable();
897 i = 0;
898 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
899 SET_COUNTER(counters[i], iter->counters.bcnt,
900 iter->counters.pcnt);
901 ++i;
902 }
903 local_bh_enable();
904 /* Processing counters from other cpus, we can let bottom half enabled,
905 * (preemption is disabled)
906 */
907 887
908 for_each_possible_cpu(cpu) { 888 for_each_possible_cpu(cpu) {
909 if (cpu == curcpu) 889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
910 continue; 890
911 i = 0; 891 i = 0;
912 local_bh_disable();
913 xt_info_wrlock(cpu);
914 xt_entry_foreach(iter, t->entries[cpu], t->size) { 892 xt_entry_foreach(iter, t->entries[cpu], t->size) {
915 ADD_COUNTER(counters[i], iter->counters.bcnt, 893 u64 bcnt, pcnt;
916 iter->counters.pcnt); 894 unsigned int start;
895
896 do {
897 start = read_seqbegin(lock);
898 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start));
901
902 ADD_COUNTER(counters[i], bcnt, pcnt);
917 ++i; /* macro does multi eval of i */ 903 ++i; /* macro does multi eval of i */
918 } 904 }
919 xt_info_wrunlock(cpu);
920 local_bh_enable();
921 } 905 }
922 put_cpu();
923} 906}
924 907
925static struct xt_counters *alloc_counters(const struct xt_table *table) 908static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
932 (other than comefrom, which userspace doesn't care 915 (other than comefrom, which userspace doesn't care
933 about). */ 916 about). */
934 countersize = sizeof(struct xt_counters) * private->number; 917 countersize = sizeof(struct xt_counters) * private->number;
935 counters = vmalloc(countersize); 918 counters = vzalloc(countersize);
936 919
937 if (counters == NULL) 920 if (counters == NULL)
938 return ERR_PTR(-ENOMEM); 921 return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1203 struct ipt_entry *iter; 1186 struct ipt_entry *iter;
1204 1187
1205 ret = 0; 1188 ret = 0;
1206 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1189 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1207 if (!counters) { 1190 if (!counters) {
1208 ret = -ENOMEM; 1191 ret = -ENOMEM;
1209 goto out; 1192 goto out;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4ef26ce..1aba54ae53c4 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
538 if (!pskb_may_pull(skb, ah_hlen)) 538 if (!pskb_may_pull(skb, ah_hlen))
539 goto out; 539 goto out;
540 540
541 ip6h = ipv6_hdr(skb);
542
543 skb_push(skb, hdr_len);
544 541
545 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 542 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
546 goto out; 543 goto out;
547 nfrags = err; 544 nfrags = err;
548 545
546 ah = (struct ip_auth_hdr *)skb->data;
547 ip6h = ipv6_hdr(skb);
548
549 skb_push(skb, hdr_len);
550
549 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); 551 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
550 if (!work_iph) 552 if (!work_iph)
551 goto out; 553 goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d1815a..d144e629d2b4 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
44 !sk2->sk_bound_dev_if || 44 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
46 (!sk->sk_reuse || !sk2->sk_reuse || 46 (!sk->sk_reuse || !sk2->sk_reuse ||
47 sk2->sk_state == TCP_LISTEN) && 47 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
48 ipv6_rcv_saddr_equal(sk, sk2)) 48 ipv6_rcv_saddr_equal(sk, sk2))
49 break; 49 break;
50 } 50 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 455582384ece..7d227c644f72 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
897 struct ip6t_entry *iter; 897 struct ip6t_entry *iter;
898 unsigned int cpu; 898 unsigned int cpu;
899 unsigned int i; 899 unsigned int i;
900 unsigned int curcpu = get_cpu();
901
902 /* Instead of clearing (by a previous call to memset())
903 * the counters and using adds, we set the counters
904 * with data used by 'current' CPU
905 *
906 * Bottom half has to be disabled to prevent deadlock
907 * if new softirq were to run and call ipt_do_table
908 */
909 local_bh_disable();
910 i = 0;
911 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
912 SET_COUNTER(counters[i], iter->counters.bcnt,
913 iter->counters.pcnt);
914 ++i;
915 }
916 local_bh_enable();
917 /* Processing counters from other cpus, we can let bottom half enabled,
918 * (preemption is disabled)
919 */
920 900
921 for_each_possible_cpu(cpu) { 901 for_each_possible_cpu(cpu) {
922 if (cpu == curcpu) 902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
923 continue; 903
924 i = 0; 904 i = 0;
925 local_bh_disable();
926 xt_info_wrlock(cpu);
927 xt_entry_foreach(iter, t->entries[cpu], t->size) { 905 xt_entry_foreach(iter, t->entries[cpu], t->size) {
928 ADD_COUNTER(counters[i], iter->counters.bcnt, 906 u64 bcnt, pcnt;
929 iter->counters.pcnt); 907 unsigned int start;
908
909 do {
910 start = read_seqbegin(lock);
911 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start));
914
915 ADD_COUNTER(counters[i], bcnt, pcnt);
930 ++i; 916 ++i;
931 } 917 }
932 xt_info_wrunlock(cpu);
933 local_bh_enable();
934 } 918 }
935 put_cpu();
936} 919}
937 920
938static struct xt_counters *alloc_counters(const struct xt_table *table) 921static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
945 (other than comefrom, which userspace doesn't care 928 (other than comefrom, which userspace doesn't care
946 about). */ 929 about). */
947 countersize = sizeof(struct xt_counters) * private->number; 930 countersize = sizeof(struct xt_counters) * private->number;
948 counters = vmalloc(countersize); 931 counters = vzalloc(countersize);
949 932
950 if (counters == NULL) 933 if (counters == NULL)
951 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1216 struct ip6t_entry *iter; 1199 struct ip6t_entry *iter;
1217 1200
1218 ret = 0; 1201 ret = 0;
1219 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1202 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1220 if (!counters) { 1203 if (!counters) {
1221 ret = -ENOMEM; 1204 ret = -ENOMEM;
1222 goto out; 1205 goto out;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0cdba50c0d69..5cb8d3027b18 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
646 u_int8_t l3proto = nfmsg->nfgen_family; 646 u_int8_t l3proto = nfmsg->nfgen_family;
647 647
648 rcu_read_lock(); 648 spin_lock_bh(&nf_conntrack_lock);
649 last = (struct nf_conn *)cb->args[1]; 649 last = (struct nf_conn *)cb->args[1];
650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { 650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
651restart: 651restart:
652 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], 652 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
653 hnnode) { 653 hnnode) {
654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
655 continue; 655 continue;
656 ct = nf_ct_tuplehash_to_ctrack(h); 656 ct = nf_ct_tuplehash_to_ctrack(h);
657 if (!atomic_inc_not_zero(&ct->ct_general.use))
658 continue;
659 /* Dump entries of a given L3 protocol number. 657 /* Dump entries of a given L3 protocol number.
660 * If it is not specified, ie. l3proto == 0, 658 * If it is not specified, ie. l3proto == 0,
661 * then dump everything. */ 659 * then dump everything. */
662 if (l3proto && nf_ct_l3num(ct) != l3proto) 660 if (l3proto && nf_ct_l3num(ct) != l3proto)
663 goto releasect; 661 continue;
664 if (cb->args[1]) { 662 if (cb->args[1]) {
665 if (ct != last) 663 if (ct != last)
666 goto releasect; 664 continue;
667 cb->args[1] = 0; 665 cb->args[1] = 0;
668 } 666 }
669 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -681,8 +679,6 @@ restart:
681 if (acct) 679 if (acct)
682 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 680 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
683 } 681 }
684releasect:
685 nf_ct_put(ct);
686 } 682 }
687 if (cb->args[1]) { 683 if (cb->args[1]) {
688 cb->args[1] = 0; 684 cb->args[1] = 0;
@@ -690,7 +686,7 @@ releasect:
690 } 686 }
691 } 687 }
692out: 688out:
693 rcu_read_unlock(); 689 spin_unlock_bh(&nf_conntrack_lock);
694 if (last) 690 if (last)
695 nf_ct_put(last); 691 nf_ct_put(last);
696 692
@@ -928,7 +924,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
928 u16 zone; 924 u16 zone;
929 int err; 925 int err;
930 926
931 if (nlh->nlmsg_flags & NLM_F_DUMP) 927 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
932 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 928 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
933 ctnetlink_done); 929 ctnetlink_done);
934 930
@@ -1790,7 +1786,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1790 u16 zone; 1786 u16 zone;
1791 int err; 1787 int err;
1792 1788
1793 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1789 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
1794 return netlink_dump_start(ctnl, skb, nlh, 1790 return netlink_dump_start(ctnl, skb, nlh,
1795 ctnetlink_exp_dump_table, 1791 ctnetlink_exp_dump_table,
1796 ctnetlink_exp_done); 1792 ctnetlink_exp_done);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 80463507420e..c94237631077 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@ static int __init xt_init(void)
1325 1325
1326 for_each_possible_cpu(i) { 1326 for_each_possible_cpu(i) {
1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1328 spin_lock_init(&lock->lock); 1328
1329 seqlock_init(&lock->lock);
1329 lock->readers = 0; 1330 lock->readers = 0;
1330 } 1331 }
1331 1332
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99145e2..f83cb370292b 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
519 security_netlink_recv(skb, CAP_NET_ADMIN)) 519 security_netlink_recv(skb, CAP_NET_ADMIN))
520 return -EPERM; 520 return -EPERM;
521 521
522 if (nlh->nlmsg_flags & NLM_F_DUMP) { 522 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
523 if (ops->dumpit == NULL) 523 if (ops->dumpit == NULL)
524 return -EOPNOTSUPP; 524 return -EOPNOTSUPP;
525 525
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb72f5d..1072b2c19d31 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
37/* Transport protocol registration */ 37/* Transport protocol registration */
38static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 38static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
39 39
40static struct phonet_protocol *phonet_proto_get(int protocol) 40static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
41{ 41{
42 struct phonet_protocol *pp; 42 struct phonet_protocol *pp;
43 43
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = {
458 458
459static DEFINE_MUTEX(proto_tab_lock); 459static DEFINE_MUTEX(proto_tab_lock);
460 460
461int __init_or_module phonet_proto_register(int protocol, 461int __init_or_module phonet_proto_register(unsigned int protocol,
462 struct phonet_protocol *pp) 462 struct phonet_protocol *pp)
463{ 463{
464 int err = 0; 464 int err = 0;
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol,
481} 481}
482EXPORT_SYMBOL(phonet_proto_register); 482EXPORT_SYMBOL(phonet_proto_register);
483 483
484void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) 484void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
485{ 485{
486 mutex_lock(&proto_tab_lock); 486 mutex_lock(&proto_tab_lock);
487 BUG_ON(proto_tab[protocol] != pp); 487 BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce9b63a..83ddfc07e45d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
508 508
509 spin_lock(&p->tcf_lock); 509 spin_lock(&p->tcf_lock);
510 p->tcf_tm.lastuse = jiffies; 510 p->tcf_tm.lastuse = jiffies;
511 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 511 bstats_update(&p->tcf_bstats, skb);
512 p->tcf_bstats.packets++;
513 action = p->tcf_action; 512 action = p->tcf_action;
514 update_flags = p->update_flags; 513 update_flags = p->update_flags;
515 spin_unlock(&p->tcf_lock); 514 spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef9632255..c2a7c20e81c1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
209 spin_lock(&ipt->tcf_lock); 209 spin_lock(&ipt->tcf_lock);
210 210
211 ipt->tcf_tm.lastuse = jiffies; 211 ipt->tcf_tm.lastuse = jiffies;
212 ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); 212 bstats_update(&ipt->tcf_bstats, skb);
213 ipt->tcf_bstats.packets++;
214 213
215 /* yes, we have to worry about both in and out dev 214 /* yes, we have to worry about both in and out dev
216 worry later - danger - this API seems to have changed 215 worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be92827..d765067e99db 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
165 165
166 spin_lock(&m->tcf_lock); 166 spin_lock(&m->tcf_lock);
167 m->tcf_tm.lastuse = jiffies; 167 m->tcf_tm.lastuse = jiffies;
168 m->tcf_bstats.bytes += qdisc_pkt_len(skb); 168 bstats_update(&m->tcf_bstats, skb);
169 m->tcf_bstats.packets++;
170 169
171 dev = m->tcfm_dev; 170 dev = m->tcfm_dev;
172 if (!dev) { 171 if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb837e600..178a4bd7b7cb 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
125 egress = p->flags & TCA_NAT_FLAG_EGRESS; 125 egress = p->flags & TCA_NAT_FLAG_EGRESS;
126 action = p->tcf_action; 126 action = p->tcf_action;
127 127
128 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 128 bstats_update(&p->tcf_bstats, skb);
129 p->tcf_bstats.packets++;
130 129
131 spin_unlock(&p->tcf_lock); 130 spin_unlock(&p->tcf_lock);
132 131
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9640db..445bef716f77 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
187bad: 187bad:
188 p->tcf_qstats.overlimits++; 188 p->tcf_qstats.overlimits++;
189done: 189done:
190 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 190 bstats_update(&p->tcf_bstats, skb);
191 p->tcf_bstats.packets++;
192 spin_unlock(&p->tcf_lock); 191 spin_unlock(&p->tcf_lock);
193 return p->tcf_action; 192 return p->tcf_action;
194} 193}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf7439b478..e2f08b1e2e58 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
298 298
299 spin_lock(&police->tcf_lock); 299 spin_lock(&police->tcf_lock);
300 300
301 police->tcf_bstats.bytes += qdisc_pkt_len(skb); 301 bstats_update(&police->tcf_bstats, skb);
302 police->tcf_bstats.packets++;
303 302
304 if (police->tcfp_ewma_rate && 303 if (police->tcfp_ewma_rate &&
305 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 304 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3ee775..7287cff7af3e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
42 42
43 spin_lock(&d->tcf_lock); 43 spin_lock(&d->tcf_lock);
44 d->tcf_tm.lastuse = jiffies; 44 d->tcf_tm.lastuse = jiffies;
45 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 45 bstats_update(&d->tcf_bstats, skb);
46 d->tcf_bstats.packets++;
47 46
48 /* print policy string followed by _ then packet count 47 /* print policy string followed by _ then packet count
49 * Example if this was the 3rd packet and the string was "hello" 48 * Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4eb8855..836f5fee9e58 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
46 46
47 spin_lock(&d->tcf_lock); 47 spin_lock(&d->tcf_lock);
48 d->tcf_tm.lastuse = jiffies; 48 d->tcf_tm.lastuse = jiffies;
49 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 49 bstats_update(&d->tcf_bstats, skb);
50 d->tcf_bstats.packets++;
51 50
52 if (d->flags & SKBEDIT_F_PRIORITY) 51 if (d->flags & SKBEDIT_F_PRIORITY)
53 skb->priority = d->priority; 52 skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 282540778aa8..943d733409d0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@ drop: __maybe_unused
422 } 422 }
423 return ret; 423 return ret;
424 } 424 }
425 sch->bstats.bytes += qdisc_pkt_len(skb); 425 qdisc_bstats_update(sch, skb);
426 sch->bstats.packets++; 426 bstats_update(&flow->bstats, skb);
427 flow->bstats.bytes += qdisc_pkt_len(skb);
428 flow->bstats.packets++;
429 /* 427 /*
430 * Okay, this may seem weird. We pretend we've dropped the packet if 428 * Okay, this may seem weird. We pretend we've dropped the packet if
431 * it goes via ATM. The reason for this is that the outer qdisc 429 * it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb7631590865..c80d1c210c5d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 ret = qdisc_enqueue(skb, cl->q); 390 ret = qdisc_enqueue(skb, cl->q);
391 if (ret == NET_XMIT_SUCCESS) { 391 if (ret == NET_XMIT_SUCCESS) {
392 sch->q.qlen++; 392 sch->q.qlen++;
393 sch->bstats.packets++; 393 qdisc_bstats_update(sch, skb);
394 sch->bstats.bytes += qdisc_pkt_len(skb);
395 cbq_mark_toplevel(q, cl); 394 cbq_mark_toplevel(q, cl);
396 if (!cl->next_alive) 395 if (!cl->next_alive)
397 cbq_activate_class(cl); 396 cbq_activate_class(cl);
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
650 ret = qdisc_enqueue(skb, cl->q); 649 ret = qdisc_enqueue(skb, cl->q);
651 if (ret == NET_XMIT_SUCCESS) { 650 if (ret == NET_XMIT_SUCCESS) {
652 sch->q.qlen++; 651 sch->q.qlen++;
653 sch->bstats.packets++; 652 qdisc_bstats_update(sch, skb);
654 sch->bstats.bytes += qdisc_pkt_len(skb);
655 if (!cl->next_alive) 653 if (!cl->next_alive)
656 cbq_activate_class(cl); 654 cbq_activate_class(cl);
657 return 0; 655 return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b5313f8cf..de55e642eafc 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
351{ 351{
352 struct drr_sched *q = qdisc_priv(sch); 352 struct drr_sched *q = qdisc_priv(sch);
353 struct drr_class *cl; 353 struct drr_class *cl;
354 unsigned int len;
355 int err; 354 int err;
356 355
357 cl = drr_classify(skb, sch, &err); 356 cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
362 return err; 361 return err;
363 } 362 }
364 363
365 len = qdisc_pkt_len(skb);
366 err = qdisc_enqueue(skb, cl->qdisc); 364 err = qdisc_enqueue(skb, cl->qdisc);
367 if (unlikely(err != NET_XMIT_SUCCESS)) { 365 if (unlikely(err != NET_XMIT_SUCCESS)) {
368 if (net_xmit_drop_count(err)) { 366 if (net_xmit_drop_count(err)) {
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 cl->deficit = cl->quantum; 375 cl->deficit = cl->quantum;
378 } 376 }
379 377
380 cl->bstats.packets++; 378 bstats_update(&cl->bstats, skb);
381 cl->bstats.bytes += len; 379 qdisc_bstats_update(sch, skb);
382 sch->bstats.packets++;
383 sch->bstats.bytes += len;
384 380
385 sch->q.qlen++; 381 sch->q.qlen++;
386 return err; 382 return err;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d62bb5c..60f4bdd4408e 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
260 return err; 260 return err;
261 } 261 }
262 262
263 sch->bstats.bytes += qdisc_pkt_len(skb); 263 qdisc_bstats_update(sch, skb);
264 sch->bstats.packets++;
265 sch->q.qlen++; 264 sch->q.qlen++;
266 265
267 return NET_XMIT_SUCCESS; 266 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b7bb36..2e45791d4f6c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1599 if (cl->qdisc->q.qlen == 1) 1599 if (cl->qdisc->q.qlen == 1)
1600 set_active(cl, qdisc_pkt_len(skb)); 1600 set_active(cl, qdisc_pkt_len(skb));
1601 1601
1602 cl->bstats.packets++; 1602 bstats_update(&cl->bstats, skb);
1603 cl->bstats.bytes += qdisc_pkt_len(skb); 1603 qdisc_bstats_update(sch, skb);
1604 sch->bstats.packets++;
1605 sch->bstats.bytes += qdisc_pkt_len(skb);
1606 sch->q.qlen++; 1604 sch->q.qlen++;
1607 1605
1608 return NET_XMIT_SUCCESS; 1606 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d6c52d..984c1b0c6836 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
569 } 569 }
570 return ret; 570 return ret;
571 } else { 571 } else {
572 cl->bstats.packets += 572 bstats_update(&cl->bstats, skb);
573 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
574 cl->bstats.bytes += qdisc_pkt_len(skb);
575 htb_activate(q, cl); 573 htb_activate(q, cl);
576 } 574 }
577 575
578 sch->q.qlen++; 576 sch->q.qlen++;
579 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 577 qdisc_bstats_update(sch, skb);
580 sch->bstats.bytes += qdisc_pkt_len(skb);
581 return NET_XMIT_SUCCESS; 578 return NET_XMIT_SUCCESS;
582} 579}
583 580
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
648 htb_add_to_wait_tree(q, cl, diff); 645 htb_add_to_wait_tree(q, cl, diff);
649 } 646 }
650 647
651 /* update byte stats except for leaves which are already updated */ 648 /* update basic stats except for leaves which are already updated */
652 if (cl->level) { 649 if (cl->level)
653 cl->bstats.bytes += bytes; 650 bstats_update(&cl->bstats, skb);
654 cl->bstats.packets += skb_is_gso(skb)? 651
655 skb_shinfo(skb)->gso_segs:1;
656 }
657 cl = cl->parent; 652 cl = cl->parent;
658 } 653 }
659} 654}
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a68445..bce1665239b8 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
63 63
64 result = tc_classify(skb, p->filter_list, &res); 64 result = tc_classify(skb, p->filter_list, &res);
65 65
66 sch->bstats.packets++; 66 qdisc_bstats_update(sch, skb);
67 sch->bstats.bytes += qdisc_pkt_len(skb);
68 switch (result) { 67 switch (result) {
69 case TC_ACT_SHOT: 68 case TC_ACT_SHOT:
70 result = TC_ACT_SHOT; 69 result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690deab5d0..21f13da24763 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 sch->q.qlen++; 87 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
90 } 89 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c083a78..1c4bce863479 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 240
241 if (likely(ret == NET_XMIT_SUCCESS)) { 241 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->q.qlen++; 242 sch->q.qlen++;
243 sch->bstats.bytes += qdisc_pkt_len(skb); 243 qdisc_bstats_update(sch, skb);
244 sch->bstats.packets++;
245 } else if (net_xmit_drop_count(ret)) { 244 } else if (net_xmit_drop_count(ret)) {
246 sch->qstats.drops++; 245 sch->qstats.drops++;
247 } 246 }
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
477 __skb_queue_after(list, skb, nskb); 476 __skb_queue_after(list, skb, nskb);
478 477
479 sch->qstats.backlog += qdisc_pkt_len(nskb); 478 sch->qstats.backlog += qdisc_pkt_len(nskb);
480 sch->bstats.bytes += qdisc_pkt_len(nskb); 479 qdisc_bstats_update(sch, nskb);
481 sch->bstats.packets++;
482 480
483 return NET_XMIT_SUCCESS; 481 return NET_XMIT_SUCCESS;
484 } 482 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bce33ce..966158d49dd1 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 84
85 ret = qdisc_enqueue(skb, qdisc); 85 ret = qdisc_enqueue(skb, qdisc);
86 if (ret == NET_XMIT_SUCCESS) { 86 if (ret == NET_XMIT_SUCCESS) {
87 sch->bstats.bytes += qdisc_pkt_len(skb); 87 qdisc_bstats_update(sch, skb);
88 sch->bstats.packets++;
89 sch->q.qlen++; 88 sch->q.qlen++;
90 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
91 } 90 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a67ba3c5a0cc..a6009c5a2c97 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 94
95 ret = qdisc_enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 qdisc_bstats_update(sch, skb);
98 sch->bstats.packets++;
99 sch->q.qlen++; 98 sch->q.qlen++;
100 } else if (net_xmit_drop_count(ret)) { 99 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 100 q->stats.pdrop++;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d54ac94066c2..239ec53a634d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
403 slot->allot = q->scaled_quantum; 403 slot->allot = q->scaled_quantum;
404 } 404 }
405 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit) {
406 sch->bstats.bytes += qdisc_pkt_len(skb); 406 qdisc_bstats_update(sch, skb);
407 sch->bstats.packets++;
408 return NET_XMIT_SUCCESS; 407 return NET_XMIT_SUCCESS;
409 } 408 }
410 409
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d64635..77565e721811 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
134 } 134 }
135 135
136 sch->q.qlen++; 136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb); 137 qdisc_bstats_update(sch, skb);
138 sch->bstats.packets++;
139 return NET_XMIT_SUCCESS; 138 return NET_XMIT_SUCCESS;
140} 139}
141 140
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a7c94a..af9360d1f6eb 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
83 83
84 if (q->q.qlen < dev->tx_queue_len) { 84 if (q->q.qlen < dev->tx_queue_len) {
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
90 89
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb889510916..d5e1e0b08890 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/xfrm.h> 27#include <net/xfrm.h>
28#include <net/netlink.h> 28#include <net/netlink.h>
29#include <net/ah.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 31#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31#include <linux/in6.h> 32#include <linux/in6.h>
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
302 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 303 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
303 if (!algo) 304 if (!algo)
304 return -ENOSYS; 305 return -ENOSYS;
305 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 306 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
307 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
306 return -EINVAL; 308 return -EINVAL;
307 *props = algo->desc.sadb_alg_id; 309 *props = algo->desc.sadb_alg_id;
308 310
@@ -2187,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2187 2189
2188 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2190 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2189 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2191 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2190 (nlh->nlmsg_flags & NLM_F_DUMP)) { 2192 (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
2191 if (link->dump == NULL) 2193 if (link->dump == NULL)
2192 return -EINVAL; 2194 return -EINVAL;
2193 2195