aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/9p/protocol.c22
-rw-r--r--net/caif/caif_socket.c2
-rw-r--r--net/caif/chnl_net.c18
-rw-r--r--net/core/dev.c149
-rw-r--r--net/core/filter.c2
-rw-r--r--net/core/rtnetlink.c2
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/input.c2
-rw-r--r--net/dccp/sysctl.c4
-rw-r--r--net/ethernet/eth.c12
-rw-r--r--net/ipv4/ah4.c7
-rw-r--r--net/ipv4/arp.c29
-rw-r--r--net/ipv4/inet_connection_sock.c5
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/netfilter/arp_tables.c45
-rw-r--r--net/ipv4/netfilter/ip_tables.c45
-rw-r--r--net/ipv6/ah6.c8
-rw-r--r--net/ipv6/inet6_connection_sock.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c45
-rw-r--r--net/netfilter/nf_conntrack_netlink.c18
-rw-r--r--net/netfilter/x_tables.c3
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/phonet/af_phonet.c6
-rw-r--r--net/sched/act_csum.c3
-rw-r--r--net/sched/act_ipt.c3
-rw-r--r--net/sched/act_mirred.c3
-rw-r--r--net/sched/act_nat.c3
-rw-r--r--net/sched/act_pedit.c3
-rw-r--r--net/sched/act_police.c3
-rw-r--r--net/sched/act_simple.c3
-rw-r--r--net/sched/act_skbedit.c3
-rw-r--r--net/sched/sch_atm.c6
-rw-r--r--net/sched/sch_cbq.c6
-rw-r--r--net/sched/sch_drr.c8
-rw-r--r--net/sched/sch_dsmark.c3
-rw-r--r--net/sched/sch_hfsc.c6
-rw-r--r--net/sched/sch_htb.c17
-rw-r--r--net/sched/sch_ingress.c3
-rw-r--r--net/sched/sch_multiq.c3
-rw-r--r--net/sched/sch_netem.c6
-rw-r--r--net/sched/sch_prio.c3
-rw-r--r--net/sched/sch_red.c3
-rw-r--r--net/sched/sch_sfq.c3
-rw-r--r--net/sched/sch_tbf.c3
-rw-r--r--net/sched/sch_teql.c3
-rw-r--r--net/sunrpc/auth.c28
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c44
-rw-r--r--net/sunrpc/bc_svc.c2
-rw-r--r--net/sunrpc/clnt.c21
-rw-r--r--net/sunrpc/rpc_pipe.c2
-rw-r--r--net/sunrpc/rpcb_clnt.c147
-rw-r--r--net/sunrpc/svc.c36
-rw-r--r--net/sunrpc/svcsock.c106
-rw-r--r--net/sunrpc/xdr.c155
-rw-r--r--net/xfrm/xfrm_user.c6
55 files changed, 590 insertions, 487 deletions
diff --git a/net/9p/protocol.c b/net/9p/protocol.c
index 798beac7f100..1e308f210928 100644
--- a/net/9p/protocol.c
+++ b/net/9p/protocol.c
@@ -178,27 +178,24 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
178 break; 178 break;
179 case 's':{ 179 case 's':{
180 char **sptr = va_arg(ap, char **); 180 char **sptr = va_arg(ap, char **);
181 int16_t len; 181 uint16_t len;
182 int size;
183 182
184 errcode = p9pdu_readf(pdu, proto_version, 183 errcode = p9pdu_readf(pdu, proto_version,
185 "w", &len); 184 "w", &len);
186 if (errcode) 185 if (errcode)
187 break; 186 break;
188 187
189 size = max_t(int16_t, len, 0); 188 *sptr = kmalloc(len + 1, GFP_KERNEL);
190
191 *sptr = kmalloc(size + 1, GFP_KERNEL);
192 if (*sptr == NULL) { 189 if (*sptr == NULL) {
193 errcode = -EFAULT; 190 errcode = -EFAULT;
194 break; 191 break;
195 } 192 }
196 if (pdu_read(pdu, *sptr, size)) { 193 if (pdu_read(pdu, *sptr, len)) {
197 errcode = -EFAULT; 194 errcode = -EFAULT;
198 kfree(*sptr); 195 kfree(*sptr);
199 *sptr = NULL; 196 *sptr = NULL;
200 } else 197 } else
201 (*sptr)[size] = 0; 198 (*sptr)[len] = 0;
202 } 199 }
203 break; 200 break;
204 case 'Q':{ 201 case 'Q':{
@@ -234,14 +231,14 @@ p9pdu_vreadf(struct p9_fcall *pdu, int proto_version, const char *fmt,
234 } 231 }
235 break; 232 break;
236 case 'D':{ 233 case 'D':{
237 int32_t *count = va_arg(ap, int32_t *); 234 uint32_t *count = va_arg(ap, uint32_t *);
238 void **data = va_arg(ap, void **); 235 void **data = va_arg(ap, void **);
239 236
240 errcode = 237 errcode =
241 p9pdu_readf(pdu, proto_version, "d", count); 238 p9pdu_readf(pdu, proto_version, "d", count);
242 if (!errcode) { 239 if (!errcode) {
243 *count = 240 *count =
244 min_t(int32_t, *count, 241 min_t(uint32_t, *count,
245 pdu->size - pdu->offset); 242 pdu->size - pdu->offset);
246 *data = &pdu->sdata[pdu->offset]; 243 *data = &pdu->sdata[pdu->offset];
247 } 244 }
@@ -404,9 +401,10 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
404 break; 401 break;
405 case 's':{ 402 case 's':{
406 const char *sptr = va_arg(ap, const char *); 403 const char *sptr = va_arg(ap, const char *);
407 int16_t len = 0; 404 uint16_t len = 0;
408 if (sptr) 405 if (sptr)
409 len = min_t(int16_t, strlen(sptr), USHRT_MAX); 406 len = min_t(uint16_t, strlen(sptr),
407 USHRT_MAX);
410 408
411 errcode = p9pdu_writef(pdu, proto_version, 409 errcode = p9pdu_writef(pdu, proto_version,
412 "w", len); 410 "w", len);
@@ -438,7 +436,7 @@ p9pdu_vwritef(struct p9_fcall *pdu, int proto_version, const char *fmt,
438 stbuf->n_gid, stbuf->n_muid); 436 stbuf->n_gid, stbuf->n_muid);
439 } break; 437 } break;
440 case 'D':{ 438 case 'D':{
441 int32_t count = va_arg(ap, int32_t); 439 uint32_t count = va_arg(ap, uint32_t);
442 const void *data = va_arg(ap, const void *); 440 const void *data = va_arg(ap, const void *);
443 441
444 errcode = p9pdu_writef(pdu, proto_version, "d", 442 errcode = p9pdu_writef(pdu, proto_version, "d",
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 1bf0cf503796..8184c031d028 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -740,12 +740,12 @@ static int setsockopt(struct socket *sock,
740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL) 740 if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
741 return -ENOPROTOOPT; 741 return -ENOPROTOOPT;
742 lock_sock(&(cf_sk->sk)); 742 lock_sock(&(cf_sk->sk));
743 cf_sk->conn_req.param.size = ol;
744 if (ol > sizeof(cf_sk->conn_req.param.data) || 743 if (ol > sizeof(cf_sk->conn_req.param.data) ||
745 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) { 744 copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
746 release_sock(&cf_sk->sk); 745 release_sock(&cf_sk->sk);
747 return -EINVAL; 746 return -EINVAL;
748 } 747 }
748 cf_sk->conn_req.param.size = ol;
749 release_sock(&cf_sk->sk); 749 release_sock(&cf_sk->sk);
750 return 0; 750 return 0;
751 751
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 84a422c98941..fa9dab372b68 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -76,6 +76,8 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl); 76 struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
77 int pktlen; 77 int pktlen;
78 int err = 0; 78 int err = 0;
79 const u8 *ip_version;
80 u8 buf;
79 81
80 priv = container_of(layr, struct chnl_net, chnl); 82 priv = container_of(layr, struct chnl_net, chnl);
81 83
@@ -90,7 +92,21 @@ static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
90 * send the packet to the net stack. 92 * send the packet to the net stack.
91 */ 93 */
92 skb->dev = priv->netdev; 94 skb->dev = priv->netdev;
93 skb->protocol = htons(ETH_P_IP); 95
96 /* check the version of IP */
97 ip_version = skb_header_pointer(skb, 0, 1, &buf);
98 if (!ip_version)
99 return -EINVAL;
100 switch (*ip_version >> 4) {
101 case 4:
102 skb->protocol = htons(ETH_P_IP);
103 break;
104 case 6:
105 skb->protocol = htons(ETH_P_IPV6);
106 break;
107 default:
108 return -EINVAL;
109 }
94 110
95 /* If we change the header in loop mode, the checksum is corrupted. */ 111 /* If we change the header in loop mode, the checksum is corrupted. */
96 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP) 112 if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
diff --git a/net/core/dev.c b/net/core/dev.c
index a215269d2e35..a3ef808b5e36 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1732,33 +1732,6 @@ void netif_device_attach(struct net_device *dev)
1732} 1732}
1733EXPORT_SYMBOL(netif_device_attach); 1733EXPORT_SYMBOL(netif_device_attach);
1734 1734
1735static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1736{
1737 return ((features & NETIF_F_NO_CSUM) ||
1738 ((features & NETIF_F_V4_CSUM) &&
1739 protocol == htons(ETH_P_IP)) ||
1740 ((features & NETIF_F_V6_CSUM) &&
1741 protocol == htons(ETH_P_IPV6)) ||
1742 ((features & NETIF_F_FCOE_CRC) &&
1743 protocol == htons(ETH_P_FCOE)));
1744}
1745
1746static bool dev_can_checksum(struct net_device *dev, struct sk_buff *skb)
1747{
1748 __be16 protocol = skb->protocol;
1749 int features = dev->features;
1750
1751 if (vlan_tx_tag_present(skb)) {
1752 features &= dev->vlan_features;
1753 } else if (protocol == htons(ETH_P_8021Q)) {
1754 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
1755 protocol = veh->h_vlan_encapsulated_proto;
1756 features &= dev->vlan_features;
1757 }
1758
1759 return can_checksum_protocol(features, protocol);
1760}
1761
1762/** 1735/**
1763 * skb_dev_set -- assign a new device to a buffer 1736 * skb_dev_set -- assign a new device to a buffer
1764 * @skb: buffer for the new device 1737 * @skb: buffer for the new device
@@ -1971,16 +1944,14 @@ static void dev_gso_skb_destructor(struct sk_buff *skb)
1971/** 1944/**
1972 * dev_gso_segment - Perform emulated hardware segmentation on skb. 1945 * dev_gso_segment - Perform emulated hardware segmentation on skb.
1973 * @skb: buffer to segment 1946 * @skb: buffer to segment
1947 * @features: device features as applicable to this skb
1974 * 1948 *
1975 * This function segments the given skb and stores the list of segments 1949 * This function segments the given skb and stores the list of segments
1976 * in skb->next. 1950 * in skb->next.
1977 */ 1951 */
1978static int dev_gso_segment(struct sk_buff *skb) 1952static int dev_gso_segment(struct sk_buff *skb, int features)
1979{ 1953{
1980 struct net_device *dev = skb->dev;
1981 struct sk_buff *segs; 1954 struct sk_buff *segs;
1982 int features = dev->features & ~(illegal_highdma(dev, skb) ?
1983 NETIF_F_SG : 0);
1984 1955
1985 segs = skb_gso_segment(skb, features); 1956 segs = skb_gso_segment(skb, features);
1986 1957
@@ -2017,22 +1988,52 @@ static inline void skb_orphan_try(struct sk_buff *skb)
2017 } 1988 }
2018} 1989}
2019 1990
2020int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev) 1991static bool can_checksum_protocol(unsigned long features, __be16 protocol)
1992{
1993 return ((features & NETIF_F_GEN_CSUM) ||
1994 ((features & NETIF_F_V4_CSUM) &&
1995 protocol == htons(ETH_P_IP)) ||
1996 ((features & NETIF_F_V6_CSUM) &&
1997 protocol == htons(ETH_P_IPV6)) ||
1998 ((features & NETIF_F_FCOE_CRC) &&
1999 protocol == htons(ETH_P_FCOE)));
2000}
2001
2002static int harmonize_features(struct sk_buff *skb, __be16 protocol, int features)
2003{
2004 if (!can_checksum_protocol(protocol, features)) {
2005 features &= ~NETIF_F_ALL_CSUM;
2006 features &= ~NETIF_F_SG;
2007 } else if (illegal_highdma(skb->dev, skb)) {
2008 features &= ~NETIF_F_SG;
2009 }
2010
2011 return features;
2012}
2013
2014int netif_skb_features(struct sk_buff *skb)
2021{ 2015{
2022 __be16 protocol = skb->protocol; 2016 __be16 protocol = skb->protocol;
2017 int features = skb->dev->features;
2023 2018
2024 if (protocol == htons(ETH_P_8021Q)) { 2019 if (protocol == htons(ETH_P_8021Q)) {
2025 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; 2020 struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
2026 protocol = veh->h_vlan_encapsulated_proto; 2021 protocol = veh->h_vlan_encapsulated_proto;
2027 } else if (!skb->vlan_tci) 2022 } else if (!vlan_tx_tag_present(skb)) {
2028 return dev->features; 2023 return harmonize_features(skb, protocol, features);
2024 }
2029 2025
2030 if (protocol != htons(ETH_P_8021Q)) 2026 features &= skb->dev->vlan_features;
2031 return dev->features & dev->vlan_features; 2027
2032 else 2028 if (protocol != htons(ETH_P_8021Q)) {
2033 return 0; 2029 return harmonize_features(skb, protocol, features);
2030 } else {
2031 features &= NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
2032 NETIF_F_GEN_CSUM;
2033 return harmonize_features(skb, protocol, features);
2034 }
2034} 2035}
2035EXPORT_SYMBOL(netif_get_vlan_features); 2036EXPORT_SYMBOL(netif_skb_features);
2036 2037
2037/* 2038/*
2038 * Returns true if either: 2039 * Returns true if either:
@@ -2042,22 +2043,13 @@ EXPORT_SYMBOL(netif_get_vlan_features);
2042 * support DMA from it. 2043 * support DMA from it.
2043 */ 2044 */
2044static inline int skb_needs_linearize(struct sk_buff *skb, 2045static inline int skb_needs_linearize(struct sk_buff *skb,
2045 struct net_device *dev) 2046 int features)
2046{ 2047{
2047 if (skb_is_nonlinear(skb)) { 2048 return skb_is_nonlinear(skb) &&
2048 int features = dev->features; 2049 ((skb_has_frag_list(skb) &&
2049 2050 !(features & NETIF_F_FRAGLIST)) ||
2050 if (vlan_tx_tag_present(skb))
2051 features &= dev->vlan_features;
2052
2053 return (skb_has_frag_list(skb) &&
2054 !(features & NETIF_F_FRAGLIST)) ||
2055 (skb_shinfo(skb)->nr_frags && 2051 (skb_shinfo(skb)->nr_frags &&
2056 (!(features & NETIF_F_SG) || 2052 !(features & NETIF_F_SG)));
2057 illegal_highdma(dev, skb)));
2058 }
2059
2060 return 0;
2061} 2053}
2062 2054
2063int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev, 2055int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
@@ -2067,6 +2059,8 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2067 int rc = NETDEV_TX_OK; 2059 int rc = NETDEV_TX_OK;
2068 2060
2069 if (likely(!skb->next)) { 2061 if (likely(!skb->next)) {
2062 int features;
2063
2070 /* 2064 /*
2071 * If device doesnt need skb->dst, release it right now while 2065 * If device doesnt need skb->dst, release it right now while
2072 * its hot in this cpu cache 2066 * its hot in this cpu cache
@@ -2079,8 +2073,10 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2079 2073
2080 skb_orphan_try(skb); 2074 skb_orphan_try(skb);
2081 2075
2076 features = netif_skb_features(skb);
2077
2082 if (vlan_tx_tag_present(skb) && 2078 if (vlan_tx_tag_present(skb) &&
2083 !(dev->features & NETIF_F_HW_VLAN_TX)) { 2079 !(features & NETIF_F_HW_VLAN_TX)) {
2084 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb)); 2080 skb = __vlan_put_tag(skb, vlan_tx_tag_get(skb));
2085 if (unlikely(!skb)) 2081 if (unlikely(!skb))
2086 goto out; 2082 goto out;
@@ -2088,13 +2084,13 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2088 skb->vlan_tci = 0; 2084 skb->vlan_tci = 0;
2089 } 2085 }
2090 2086
2091 if (netif_needs_gso(dev, skb)) { 2087 if (netif_needs_gso(skb, features)) {
2092 if (unlikely(dev_gso_segment(skb))) 2088 if (unlikely(dev_gso_segment(skb, features)))
2093 goto out_kfree_skb; 2089 goto out_kfree_skb;
2094 if (skb->next) 2090 if (skb->next)
2095 goto gso; 2091 goto gso;
2096 } else { 2092 } else {
2097 if (skb_needs_linearize(skb, dev) && 2093 if (skb_needs_linearize(skb, features) &&
2098 __skb_linearize(skb)) 2094 __skb_linearize(skb))
2099 goto out_kfree_skb; 2095 goto out_kfree_skb;
2100 2096
@@ -2105,7 +2101,7 @@ int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
2105 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2101 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2106 skb_set_transport_header(skb, 2102 skb_set_transport_header(skb,
2107 skb_checksum_start_offset(skb)); 2103 skb_checksum_start_offset(skb));
2108 if (!dev_can_checksum(dev, skb) && 2104 if (!(features & NETIF_F_ALL_CSUM) &&
2109 skb_checksum_help(skb)) 2105 skb_checksum_help(skb))
2110 goto out_kfree_skb; 2106 goto out_kfree_skb;
2111 } 2107 }
@@ -2301,7 +2297,10 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2301 */ 2297 */
2302 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE)) 2298 if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
2303 skb_dst_force(skb); 2299 skb_dst_force(skb);
2304 __qdisc_update_bstats(q, skb->len); 2300
2301 qdisc_skb_cb(skb)->pkt_len = skb->len;
2302 qdisc_bstats_update(q, skb);
2303
2305 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) { 2304 if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
2306 if (unlikely(contended)) { 2305 if (unlikely(contended)) {
2307 spin_unlock(&q->busylock); 2306 spin_unlock(&q->busylock);
@@ -5621,18 +5620,20 @@ struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
5621} 5620}
5622 5621
5623/** 5622/**
5624 * alloc_netdev_mq - allocate network device 5623 * alloc_netdev_mqs - allocate network device
5625 * @sizeof_priv: size of private data to allocate space for 5624 * @sizeof_priv: size of private data to allocate space for
5626 * @name: device name format string 5625 * @name: device name format string
5627 * @setup: callback to initialize device 5626 * @setup: callback to initialize device
5628 * @queue_count: the number of subqueues to allocate 5627 * @txqs: the number of TX subqueues to allocate
5628 * @rxqs: the number of RX subqueues to allocate
5629 * 5629 *
5630 * Allocates a struct net_device with private data area for driver use 5630 * Allocates a struct net_device with private data area for driver use
5631 * and performs basic initialization. Also allocates subquue structs 5631 * and performs basic initialization. Also allocates subquue structs
5632 * for each queue on the device at the end of the netdevice. 5632 * for each queue on the device.
5633 */ 5633 */
5634struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name, 5634struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
5635 void (*setup)(struct net_device *), unsigned int queue_count) 5635 void (*setup)(struct net_device *),
5636 unsigned int txqs, unsigned int rxqs)
5636{ 5637{
5637 struct net_device *dev; 5638 struct net_device *dev;
5638 size_t alloc_size; 5639 size_t alloc_size;
@@ -5640,12 +5641,20 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5640 5641
5641 BUG_ON(strlen(name) >= sizeof(dev->name)); 5642 BUG_ON(strlen(name) >= sizeof(dev->name));
5642 5643
5643 if (queue_count < 1) { 5644 if (txqs < 1) {
5644 pr_err("alloc_netdev: Unable to allocate device " 5645 pr_err("alloc_netdev: Unable to allocate device "
5645 "with zero queues.\n"); 5646 "with zero queues.\n");
5646 return NULL; 5647 return NULL;
5647 } 5648 }
5648 5649
5650#ifdef CONFIG_RPS
5651 if (rxqs < 1) {
5652 pr_err("alloc_netdev: Unable to allocate device "
5653 "with zero RX queues.\n");
5654 return NULL;
5655 }
5656#endif
5657
5649 alloc_size = sizeof(struct net_device); 5658 alloc_size = sizeof(struct net_device);
5650 if (sizeof_priv) { 5659 if (sizeof_priv) {
5651 /* ensure 32-byte alignment of private area */ 5660 /* ensure 32-byte alignment of private area */
@@ -5676,14 +5685,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
5676 5685
5677 dev_net_set(dev, &init_net); 5686 dev_net_set(dev, &init_net);
5678 5687
5679 dev->num_tx_queues = queue_count; 5688 dev->num_tx_queues = txqs;
5680 dev->real_num_tx_queues = queue_count; 5689 dev->real_num_tx_queues = txqs;
5681 if (netif_alloc_netdev_queues(dev)) 5690 if (netif_alloc_netdev_queues(dev))
5682 goto free_pcpu; 5691 goto free_pcpu;
5683 5692
5684#ifdef CONFIG_RPS 5693#ifdef CONFIG_RPS
5685 dev->num_rx_queues = queue_count; 5694 dev->num_rx_queues = rxqs;
5686 dev->real_num_rx_queues = queue_count; 5695 dev->real_num_rx_queues = rxqs;
5687 if (netif_alloc_rx_queues(dev)) 5696 if (netif_alloc_rx_queues(dev))
5688 goto free_pcpu; 5697 goto free_pcpu;
5689#endif 5698#endif
@@ -5711,7 +5720,7 @@ free_p:
5711 kfree(p); 5720 kfree(p);
5712 return NULL; 5721 return NULL;
5713} 5722}
5714EXPORT_SYMBOL(alloc_netdev_mq); 5723EXPORT_SYMBOL(alloc_netdev_mqs);
5715 5724
5716/** 5725/**
5717 * free_netdev - free network device 5726 * free_netdev - free network device
diff --git a/net/core/filter.c b/net/core/filter.c
index 2b27d4efdd48..afc58374ca96 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -158,7 +158,7 @@ EXPORT_SYMBOL(sk_filter);
158/** 158/**
159 * sk_run_filter - run a filter on a socket 159 * sk_run_filter - run a filter on a socket
160 * @skb: buffer to run the filter on 160 * @skb: buffer to run the filter on
161 * @filter: filter to apply 161 * @fentry: filter to apply
162 * 162 *
163 * Decode and apply filter instructions to the skb->data. 163 * Decode and apply filter instructions to the skb->data.
164 * Return length to keep, 0 for none. @skb is the data we are 164 * Return length to keep, 0 for none. @skb is the data we are
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 750db57f3bb3..a5f7535aab5b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1820,7 +1820,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN)) 1820 if (kind != 2 && security_netlink_recv(skb, CAP_NET_ADMIN))
1821 return -EPERM; 1821 return -EPERM;
1822 1822
1823 if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) { 1823 if (kind == 2 && (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
1824 struct sock *rtnl; 1824 struct sock *rtnl;
1825 rtnl_dumpit_func dumpit; 1825 rtnl_dumpit_func dumpit;
1826 1826
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 45087052d894..5fdb07229017 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -426,7 +426,8 @@ static inline void dccp_update_gsr(struct sock *sk, u64 seq)
426{ 426{
427 struct dccp_sock *dp = dccp_sk(sk); 427 struct dccp_sock *dp = dccp_sk(sk);
428 428
429 dp->dccps_gsr = seq; 429 if (after48(seq, dp->dccps_gsr))
430 dp->dccps_gsr = seq;
430 /* Sequence validity window depends on remote Sequence Window (7.5.1) */ 431 /* Sequence validity window depends on remote Sequence Window (7.5.1) */
431 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4); 432 dp->dccps_swl = SUB48(ADD48(dp->dccps_gsr, 1), dp->dccps_r_seq_win / 4);
432 /* 433 /*
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 15af247ea007..8cde009e8b85 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -260,7 +260,7 @@ static int dccp_check_seqno(struct sock *sk, struct sk_buff *skb)
260 */ 260 */
261 if (time_before(now, (dp->dccps_rate_last + 261 if (time_before(now, (dp->dccps_rate_last +
262 sysctl_dccp_sync_ratelimit))) 262 sysctl_dccp_sync_ratelimit)))
263 return 0; 263 return -1;
264 264
265 DCCP_WARN("Step 6 failed for %s packet, " 265 DCCP_WARN("Step 6 failed for %s packet, "
266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and " 266 "(LSWL(%llu) <= P.seqno(%llu) <= S.SWH(%llu)) and "
diff --git a/net/dccp/sysctl.c b/net/dccp/sysctl.c
index 563943822e58..42348824ee31 100644
--- a/net/dccp/sysctl.c
+++ b/net/dccp/sysctl.c
@@ -21,7 +21,8 @@
21/* Boundary values */ 21/* Boundary values */
22static int zero = 0, 22static int zero = 0,
23 u8_max = 0xFF; 23 u8_max = 0xFF;
24static unsigned long seqw_min = 32; 24static unsigned long seqw_min = DCCPF_SEQ_WMIN,
25 seqw_max = 0xFFFFFFFF; /* maximum on 32 bit */
25 26
26static struct ctl_table dccp_default_table[] = { 27static struct ctl_table dccp_default_table[] = {
27 { 28 {
@@ -31,6 +32,7 @@ static struct ctl_table dccp_default_table[] = {
31 .mode = 0644, 32 .mode = 0644,
32 .proc_handler = proc_doulongvec_minmax, 33 .proc_handler = proc_doulongvec_minmax,
33 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */ 34 .extra1 = &seqw_min, /* RFC 4340, 7.5.2 */
35 .extra2 = &seqw_max,
34 }, 36 },
35 { 37 {
36 .procname = "rx_ccid", 38 .procname = "rx_ccid",
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index f00ef2f1d814..f9d7ac924f15 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -347,10 +347,11 @@ void ether_setup(struct net_device *dev)
347EXPORT_SYMBOL(ether_setup); 347EXPORT_SYMBOL(ether_setup);
348 348
349/** 349/**
350 * alloc_etherdev_mq - Allocates and sets up an Ethernet device 350 * alloc_etherdev_mqs - Allocates and sets up an Ethernet device
351 * @sizeof_priv: Size of additional driver-private structure to be allocated 351 * @sizeof_priv: Size of additional driver-private structure to be allocated
352 * for this Ethernet device 352 * for this Ethernet device
353 * @queue_count: The number of queues this device has. 353 * @txqs: The number of TX queues this device has.
354 * @txqs: The number of RX queues this device has.
354 * 355 *
355 * Fill in the fields of the device structure with Ethernet-generic 356 * Fill in the fields of the device structure with Ethernet-generic
356 * values. Basically does everything except registering the device. 357 * values. Basically does everything except registering the device.
@@ -360,11 +361,12 @@ EXPORT_SYMBOL(ether_setup);
360 * this private data area. 361 * this private data area.
361 */ 362 */
362 363
363struct net_device *alloc_etherdev_mq(int sizeof_priv, unsigned int queue_count) 364struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
365 unsigned int rxqs)
364{ 366{
365 return alloc_netdev_mq(sizeof_priv, "eth%d", ether_setup, queue_count); 367 return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
366} 368}
367EXPORT_SYMBOL(alloc_etherdev_mq); 369EXPORT_SYMBOL(alloc_etherdev_mqs);
368 370
369static size_t _format_mac_addr(char *buf, int buflen, 371static size_t _format_mac_addr(char *buf, int buflen,
370 const unsigned char *addr, int len) 372 const unsigned char *addr, int len)
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index 880a5ec6dce0..86961bec70ab 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -314,14 +314,15 @@ static int ah_input(struct xfrm_state *x, struct sk_buff *skb)
314 314
315 skb->ip_summed = CHECKSUM_NONE; 315 skb->ip_summed = CHECKSUM_NONE;
316 316
317 ah = (struct ip_auth_hdr *)skb->data;
318 iph = ip_hdr(skb);
319 ihl = ip_hdrlen(skb);
320 317
321 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 318 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
322 goto out; 319 goto out;
323 nfrags = err; 320 nfrags = err;
324 321
322 ah = (struct ip_auth_hdr *)skb->data;
323 iph = ip_hdr(skb);
324 ihl = ip_hdrlen(skb);
325
325 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len); 326 work_iph = ah_alloc_tmp(ahash, nfrags, ihl + ahp->icv_trunc_len);
326 if (!work_iph) 327 if (!work_iph)
327 goto out; 328 goto out;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index a2fc7b961dbc..04c8b69fd426 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1143,6 +1143,23 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1143 return err; 1143 return err;
1144} 1144}
1145 1145
1146int arp_invalidate(struct net_device *dev, __be32 ip)
1147{
1148 struct neighbour *neigh = neigh_lookup(&arp_tbl, &ip, dev);
1149 int err = -ENXIO;
1150
1151 if (neigh) {
1152 if (neigh->nud_state & ~NUD_NOARP)
1153 err = neigh_update(neigh, NULL, NUD_FAILED,
1154 NEIGH_UPDATE_F_OVERRIDE|
1155 NEIGH_UPDATE_F_ADMIN);
1156 neigh_release(neigh);
1157 }
1158
1159 return err;
1160}
1161EXPORT_SYMBOL(arp_invalidate);
1162
1146static int arp_req_delete_public(struct net *net, struct arpreq *r, 1163static int arp_req_delete_public(struct net *net, struct arpreq *r,
1147 struct net_device *dev) 1164 struct net_device *dev)
1148{ 1165{
@@ -1163,7 +1180,6 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1163{ 1180{
1164 int err; 1181 int err;
1165 __be32 ip; 1182 __be32 ip;
1166 struct neighbour *neigh;
1167 1183
1168 if (r->arp_flags & ATF_PUBL) 1184 if (r->arp_flags & ATF_PUBL)
1169 return arp_req_delete_public(net, r, dev); 1185 return arp_req_delete_public(net, r, dev);
@@ -1181,16 +1197,7 @@ static int arp_req_delete(struct net *net, struct arpreq *r,
1181 if (!dev) 1197 if (!dev)
1182 return -EINVAL; 1198 return -EINVAL;
1183 } 1199 }
1184 err = -ENXIO; 1200 return arp_invalidate(dev, ip);
1185 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1186 if (neigh) {
1187 if (neigh->nud_state & ~NUD_NOARP)
1188 err = neigh_update(neigh, NULL, NUD_FAILED,
1189 NEIGH_UPDATE_F_OVERRIDE|
1190 NEIGH_UPDATE_F_ADMIN);
1191 neigh_release(neigh);
1192 }
1193 return err;
1194} 1201}
1195 1202
1196/* 1203/*
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 25e318153f14..97e5fb765265 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -73,7 +73,7 @@ int inet_csk_bind_conflict(const struct sock *sk,
73 !sk2->sk_bound_dev_if || 73 !sk2->sk_bound_dev_if ||
74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) { 74 sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
75 if (!reuse || !sk2->sk_reuse || 75 if (!reuse || !sk2->sk_reuse ||
76 sk2->sk_state == TCP_LISTEN) { 76 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) {
77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2); 77 const __be32 sk2_rcv_saddr = sk_rcv_saddr(sk2);
78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) || 78 if (!sk2_rcv_saddr || !sk_rcv_saddr(sk) ||
79 sk2_rcv_saddr == sk_rcv_saddr(sk)) 79 sk2_rcv_saddr == sk_rcv_saddr(sk))
@@ -122,7 +122,8 @@ again:
122 (tb->num_owners < smallest_size || smallest_size == -1)) { 122 (tb->num_owners < smallest_size || smallest_size == -1)) {
123 smallest_size = tb->num_owners; 123 smallest_size = tb->num_owners;
124 smallest_rover = rover; 124 smallest_rover = rover;
125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1) { 125 if (atomic_read(&hashinfo->bsockets) > (high - low) + 1 &&
126 !inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb)) {
126 spin_unlock(&head->lock); 127 spin_unlock(&head->lock);
127 snum = smallest_rover; 128 snum = smallest_rover;
128 goto have_snum; 129 goto have_snum;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 2ada17129fce..2746c1fa6417 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -858,7 +858,7 @@ static int inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
858 nlmsg_len(nlh) < hdrlen) 858 nlmsg_len(nlh) < hdrlen)
859 return -EINVAL; 859 return -EINVAL;
860 860
861 if (nlh->nlmsg_flags & NLM_F_DUMP) { 861 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
862 if (nlmsg_attrlen(nlh, hdrlen)) { 862 if (nlmsg_attrlen(nlh, hdrlen)) {
863 struct nlattr *attr; 863 struct nlattr *attr;
864 864
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3fac340a28d5..e855fffaed95 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -710,42 +710,25 @@ static void get_counters(const struct xt_table_info *t,
710 struct arpt_entry *iter; 710 struct arpt_entry *iter;
711 unsigned int cpu; 711 unsigned int cpu;
712 unsigned int i; 712 unsigned int i;
713 unsigned int curcpu = get_cpu();
714
715 /* Instead of clearing (by a previous call to memset())
716 * the counters and using adds, we set the counters
717 * with data used by 'current' CPU
718 *
719 * Bottom half has to be disabled to prevent deadlock
720 * if new softirq were to run and call ipt_do_table
721 */
722 local_bh_disable();
723 i = 0;
724 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
725 SET_COUNTER(counters[i], iter->counters.bcnt,
726 iter->counters.pcnt);
727 ++i;
728 }
729 local_bh_enable();
730 /* Processing counters from other cpus, we can let bottom half enabled,
731 * (preemption is disabled)
732 */
733 713
734 for_each_possible_cpu(cpu) { 714 for_each_possible_cpu(cpu) {
735 if (cpu == curcpu) 715 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
736 continue; 716
737 i = 0; 717 i = 0;
738 local_bh_disable();
739 xt_info_wrlock(cpu);
740 xt_entry_foreach(iter, t->entries[cpu], t->size) { 718 xt_entry_foreach(iter, t->entries[cpu], t->size) {
741 ADD_COUNTER(counters[i], iter->counters.bcnt, 719 u64 bcnt, pcnt;
742 iter->counters.pcnt); 720 unsigned int start;
721
722 do {
723 start = read_seqbegin(lock);
724 bcnt = iter->counters.bcnt;
725 pcnt = iter->counters.pcnt;
726 } while (read_seqretry(lock, start));
727
728 ADD_COUNTER(counters[i], bcnt, pcnt);
743 ++i; 729 ++i;
744 } 730 }
745 xt_info_wrunlock(cpu);
746 local_bh_enable();
747 } 731 }
748 put_cpu();
749} 732}
750 733
751static struct xt_counters *alloc_counters(const struct xt_table *table) 734static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -759,7 +742,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
759 * about). 742 * about).
760 */ 743 */
761 countersize = sizeof(struct xt_counters) * private->number; 744 countersize = sizeof(struct xt_counters) * private->number;
762 counters = vmalloc(countersize); 745 counters = vzalloc(countersize);
763 746
764 if (counters == NULL) 747 if (counters == NULL)
765 return ERR_PTR(-ENOMEM); 748 return ERR_PTR(-ENOMEM);
@@ -1007,7 +990,7 @@ static int __do_replace(struct net *net, const char *name,
1007 struct arpt_entry *iter; 990 struct arpt_entry *iter;
1008 991
1009 ret = 0; 992 ret = 0;
1010 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 993 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1011 if (!counters) { 994 if (!counters) {
1012 ret = -ENOMEM; 995 ret = -ENOMEM;
1013 goto out; 996 goto out;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index a846d633b3b6..652efea013dc 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -884,42 +884,25 @@ get_counters(const struct xt_table_info *t,
884 struct ipt_entry *iter; 884 struct ipt_entry *iter;
885 unsigned int cpu; 885 unsigned int cpu;
886 unsigned int i; 886 unsigned int i;
887 unsigned int curcpu = get_cpu();
888
889 /* Instead of clearing (by a previous call to memset())
890 * the counters and using adds, we set the counters
891 * with data used by 'current' CPU.
892 *
893 * Bottom half has to be disabled to prevent deadlock
894 * if new softirq were to run and call ipt_do_table
895 */
896 local_bh_disable();
897 i = 0;
898 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
899 SET_COUNTER(counters[i], iter->counters.bcnt,
900 iter->counters.pcnt);
901 ++i;
902 }
903 local_bh_enable();
904 /* Processing counters from other cpus, we can let bottom half enabled,
905 * (preemption is disabled)
906 */
907 887
908 for_each_possible_cpu(cpu) { 888 for_each_possible_cpu(cpu) {
909 if (cpu == curcpu) 889 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
910 continue; 890
911 i = 0; 891 i = 0;
912 local_bh_disable();
913 xt_info_wrlock(cpu);
914 xt_entry_foreach(iter, t->entries[cpu], t->size) { 892 xt_entry_foreach(iter, t->entries[cpu], t->size) {
915 ADD_COUNTER(counters[i], iter->counters.bcnt, 893 u64 bcnt, pcnt;
916 iter->counters.pcnt); 894 unsigned int start;
895
896 do {
897 start = read_seqbegin(lock);
898 bcnt = iter->counters.bcnt;
899 pcnt = iter->counters.pcnt;
900 } while (read_seqretry(lock, start));
901
902 ADD_COUNTER(counters[i], bcnt, pcnt);
917 ++i; /* macro does multi eval of i */ 903 ++i; /* macro does multi eval of i */
918 } 904 }
919 xt_info_wrunlock(cpu);
920 local_bh_enable();
921 } 905 }
922 put_cpu();
923} 906}
924 907
925static struct xt_counters *alloc_counters(const struct xt_table *table) 908static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -932,7 +915,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
932 (other than comefrom, which userspace doesn't care 915 (other than comefrom, which userspace doesn't care
933 about). */ 916 about). */
934 countersize = sizeof(struct xt_counters) * private->number; 917 countersize = sizeof(struct xt_counters) * private->number;
935 counters = vmalloc(countersize); 918 counters = vzalloc(countersize);
936 919
937 if (counters == NULL) 920 if (counters == NULL)
938 return ERR_PTR(-ENOMEM); 921 return ERR_PTR(-ENOMEM);
@@ -1203,7 +1186,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1203 struct ipt_entry *iter; 1186 struct ipt_entry *iter;
1204 1187
1205 ret = 0; 1188 ret = 0;
1206 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1189 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1207 if (!counters) { 1190 if (!counters) {
1208 ret = -ENOMEM; 1191 ret = -ENOMEM;
1209 goto out; 1192 goto out;
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index ee82d4ef26ce..1aba54ae53c4 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -538,14 +538,16 @@ static int ah6_input(struct xfrm_state *x, struct sk_buff *skb)
538 if (!pskb_may_pull(skb, ah_hlen)) 538 if (!pskb_may_pull(skb, ah_hlen))
539 goto out; 539 goto out;
540 540
541 ip6h = ipv6_hdr(skb);
542
543 skb_push(skb, hdr_len);
544 541
545 if ((err = skb_cow_data(skb, 0, &trailer)) < 0) 542 if ((err = skb_cow_data(skb, 0, &trailer)) < 0)
546 goto out; 543 goto out;
547 nfrags = err; 544 nfrags = err;
548 545
546 ah = (struct ip_auth_hdr *)skb->data;
547 ip6h = ipv6_hdr(skb);
548
549 skb_push(skb, hdr_len);
550
549 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len); 551 work_iph = ah_alloc_tmp(ahash, nfrags, hdr_len + ahp->icv_trunc_len);
550 if (!work_iph) 552 if (!work_iph)
551 goto out; 553 goto out;
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index e46305d1815a..d144e629d2b4 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -44,7 +44,7 @@ int inet6_csk_bind_conflict(const struct sock *sk,
44 !sk2->sk_bound_dev_if || 44 !sk2->sk_bound_dev_if ||
45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) && 45 sk->sk_bound_dev_if == sk2->sk_bound_dev_if) &&
46 (!sk->sk_reuse || !sk2->sk_reuse || 46 (!sk->sk_reuse || !sk2->sk_reuse ||
47 sk2->sk_state == TCP_LISTEN) && 47 ((1 << sk2->sk_state) & (TCPF_LISTEN | TCPF_CLOSE))) &&
48 ipv6_rcv_saddr_equal(sk, sk2)) 48 ipv6_rcv_saddr_equal(sk, sk2))
49 break; 49 break;
50 } 50 }
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 455582384ece..7d227c644f72 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -897,42 +897,25 @@ get_counters(const struct xt_table_info *t,
897 struct ip6t_entry *iter; 897 struct ip6t_entry *iter;
898 unsigned int cpu; 898 unsigned int cpu;
899 unsigned int i; 899 unsigned int i;
900 unsigned int curcpu = get_cpu();
901
902 /* Instead of clearing (by a previous call to memset())
903 * the counters and using adds, we set the counters
904 * with data used by 'current' CPU
905 *
906 * Bottom half has to be disabled to prevent deadlock
907 * if new softirq were to run and call ipt_do_table
908 */
909 local_bh_disable();
910 i = 0;
911 xt_entry_foreach(iter, t->entries[curcpu], t->size) {
912 SET_COUNTER(counters[i], iter->counters.bcnt,
913 iter->counters.pcnt);
914 ++i;
915 }
916 local_bh_enable();
917 /* Processing counters from other cpus, we can let bottom half enabled,
918 * (preemption is disabled)
919 */
920 900
921 for_each_possible_cpu(cpu) { 901 for_each_possible_cpu(cpu) {
922 if (cpu == curcpu) 902 seqlock_t *lock = &per_cpu(xt_info_locks, cpu).lock;
923 continue; 903
924 i = 0; 904 i = 0;
925 local_bh_disable();
926 xt_info_wrlock(cpu);
927 xt_entry_foreach(iter, t->entries[cpu], t->size) { 905 xt_entry_foreach(iter, t->entries[cpu], t->size) {
928 ADD_COUNTER(counters[i], iter->counters.bcnt, 906 u64 bcnt, pcnt;
929 iter->counters.pcnt); 907 unsigned int start;
908
909 do {
910 start = read_seqbegin(lock);
911 bcnt = iter->counters.bcnt;
912 pcnt = iter->counters.pcnt;
913 } while (read_seqretry(lock, start));
914
915 ADD_COUNTER(counters[i], bcnt, pcnt);
930 ++i; 916 ++i;
931 } 917 }
932 xt_info_wrunlock(cpu);
933 local_bh_enable();
934 } 918 }
935 put_cpu();
936} 919}
937 920
938static struct xt_counters *alloc_counters(const struct xt_table *table) 921static struct xt_counters *alloc_counters(const struct xt_table *table)
@@ -945,7 +928,7 @@ static struct xt_counters *alloc_counters(const struct xt_table *table)
945 (other than comefrom, which userspace doesn't care 928 (other than comefrom, which userspace doesn't care
946 about). */ 929 about). */
947 countersize = sizeof(struct xt_counters) * private->number; 930 countersize = sizeof(struct xt_counters) * private->number;
948 counters = vmalloc(countersize); 931 counters = vzalloc(countersize);
949 932
950 if (counters == NULL) 933 if (counters == NULL)
951 return ERR_PTR(-ENOMEM); 934 return ERR_PTR(-ENOMEM);
@@ -1216,7 +1199,7 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,
1216 struct ip6t_entry *iter; 1199 struct ip6t_entry *iter;
1217 1200
1218 ret = 0; 1201 ret = 0;
1219 counters = vmalloc(num_counters * sizeof(struct xt_counters)); 1202 counters = vzalloc(num_counters * sizeof(struct xt_counters));
1220 if (!counters) { 1203 if (!counters) {
1221 ret = -ENOMEM; 1204 ret = -ENOMEM;
1222 goto out; 1205 goto out;
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 0cdba50c0d69..5cb8d3027b18 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -645,25 +645,23 @@ ctnetlink_dump_table(struct sk_buff *skb, struct netlink_callback *cb)
645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 645 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
646 u_int8_t l3proto = nfmsg->nfgen_family; 646 u_int8_t l3proto = nfmsg->nfgen_family;
647 647
648 rcu_read_lock(); 648 spin_lock_bh(&nf_conntrack_lock);
649 last = (struct nf_conn *)cb->args[1]; 649 last = (struct nf_conn *)cb->args[1];
650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) { 650 for (; cb->args[0] < net->ct.htable_size; cb->args[0]++) {
651restart: 651restart:
652 hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[cb->args[0]], 652 hlist_nulls_for_each_entry(h, n, &net->ct.hash[cb->args[0]],
653 hnnode) { 653 hnnode) {
654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL) 654 if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
655 continue; 655 continue;
656 ct = nf_ct_tuplehash_to_ctrack(h); 656 ct = nf_ct_tuplehash_to_ctrack(h);
657 if (!atomic_inc_not_zero(&ct->ct_general.use))
658 continue;
659 /* Dump entries of a given L3 protocol number. 657 /* Dump entries of a given L3 protocol number.
660 * If it is not specified, ie. l3proto == 0, 658 * If it is not specified, ie. l3proto == 0,
661 * then dump everything. */ 659 * then dump everything. */
662 if (l3proto && nf_ct_l3num(ct) != l3proto) 660 if (l3proto && nf_ct_l3num(ct) != l3proto)
663 goto releasect; 661 continue;
664 if (cb->args[1]) { 662 if (cb->args[1]) {
665 if (ct != last) 663 if (ct != last)
666 goto releasect; 664 continue;
667 cb->args[1] = 0; 665 cb->args[1] = 0;
668 } 666 }
669 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid, 667 if (ctnetlink_fill_info(skb, NETLINK_CB(cb->skb).pid,
@@ -681,8 +679,6 @@ restart:
681 if (acct) 679 if (acct)
682 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX])); 680 memset(acct, 0, sizeof(struct nf_conn_counter[IP_CT_DIR_MAX]));
683 } 681 }
684releasect:
685 nf_ct_put(ct);
686 } 682 }
687 if (cb->args[1]) { 683 if (cb->args[1]) {
688 cb->args[1] = 0; 684 cb->args[1] = 0;
@@ -690,7 +686,7 @@ releasect:
690 } 686 }
691 } 687 }
692out: 688out:
693 rcu_read_unlock(); 689 spin_unlock_bh(&nf_conntrack_lock);
694 if (last) 690 if (last)
695 nf_ct_put(last); 691 nf_ct_put(last);
696 692
@@ -928,7 +924,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
928 u16 zone; 924 u16 zone;
929 int err; 925 int err;
930 926
931 if (nlh->nlmsg_flags & NLM_F_DUMP) 927 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP)
932 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table, 928 return netlink_dump_start(ctnl, skb, nlh, ctnetlink_dump_table,
933 ctnetlink_done); 929 ctnetlink_done);
934 930
@@ -1790,7 +1786,7 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
1790 u16 zone; 1786 u16 zone;
1791 int err; 1787 int err;
1792 1788
1793 if (nlh->nlmsg_flags & NLM_F_DUMP) { 1789 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
1794 return netlink_dump_start(ctnl, skb, nlh, 1790 return netlink_dump_start(ctnl, skb, nlh,
1795 ctnetlink_exp_dump_table, 1791 ctnetlink_exp_dump_table,
1796 ctnetlink_exp_done); 1792 ctnetlink_exp_done);
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 80463507420e..c94237631077 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -1325,7 +1325,8 @@ static int __init xt_init(void)
1325 1325
1326 for_each_possible_cpu(i) { 1326 for_each_possible_cpu(i) {
1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i); 1327 struct xt_info_lock *lock = &per_cpu(xt_info_locks, i);
1328 spin_lock_init(&lock->lock); 1328
1329 seqlock_init(&lock->lock);
1329 lock->readers = 0; 1330 lock->readers = 0;
1330 } 1331 }
1331 1332
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 1781d99145e2..f83cb370292b 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -519,7 +519,7 @@ static int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
519 security_netlink_recv(skb, CAP_NET_ADMIN)) 519 security_netlink_recv(skb, CAP_NET_ADMIN))
520 return -EPERM; 520 return -EPERM;
521 521
522 if (nlh->nlmsg_flags & NLM_F_DUMP) { 522 if ((nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
523 if (ops->dumpit == NULL) 523 if (ops->dumpit == NULL)
524 return -EOPNOTSUPP; 524 return -EOPNOTSUPP;
525 525
diff --git a/net/phonet/af_phonet.c b/net/phonet/af_phonet.c
index fd95beb72f5d..1072b2c19d31 100644
--- a/net/phonet/af_phonet.c
+++ b/net/phonet/af_phonet.c
@@ -37,7 +37,7 @@
37/* Transport protocol registration */ 37/* Transport protocol registration */
38static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly; 38static struct phonet_protocol *proto_tab[PHONET_NPROTO] __read_mostly;
39 39
40static struct phonet_protocol *phonet_proto_get(int protocol) 40static struct phonet_protocol *phonet_proto_get(unsigned int protocol)
41{ 41{
42 struct phonet_protocol *pp; 42 struct phonet_protocol *pp;
43 43
@@ -458,7 +458,7 @@ static struct packet_type phonet_packet_type __read_mostly = {
458 458
459static DEFINE_MUTEX(proto_tab_lock); 459static DEFINE_MUTEX(proto_tab_lock);
460 460
461int __init_or_module phonet_proto_register(int protocol, 461int __init_or_module phonet_proto_register(unsigned int protocol,
462 struct phonet_protocol *pp) 462 struct phonet_protocol *pp)
463{ 463{
464 int err = 0; 464 int err = 0;
@@ -481,7 +481,7 @@ int __init_or_module phonet_proto_register(int protocol,
481} 481}
482EXPORT_SYMBOL(phonet_proto_register); 482EXPORT_SYMBOL(phonet_proto_register);
483 483
484void phonet_proto_unregister(int protocol, struct phonet_protocol *pp) 484void phonet_proto_unregister(unsigned int protocol, struct phonet_protocol *pp)
485{ 485{
486 mutex_lock(&proto_tab_lock); 486 mutex_lock(&proto_tab_lock);
487 BUG_ON(proto_tab[protocol] != pp); 487 BUG_ON(proto_tab[protocol] != pp);
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 67dc7ce9b63a..83ddfc07e45d 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -508,8 +508,7 @@ static int tcf_csum(struct sk_buff *skb,
508 508
509 spin_lock(&p->tcf_lock); 509 spin_lock(&p->tcf_lock);
510 p->tcf_tm.lastuse = jiffies; 510 p->tcf_tm.lastuse = jiffies;
511 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 511 bstats_update(&p->tcf_bstats, skb);
512 p->tcf_bstats.packets++;
513 action = p->tcf_action; 512 action = p->tcf_action;
514 update_flags = p->update_flags; 513 update_flags = p->update_flags;
515 spin_unlock(&p->tcf_lock); 514 spin_unlock(&p->tcf_lock);
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8daef9632255..c2a7c20e81c1 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -209,8 +209,7 @@ static int tcf_ipt(struct sk_buff *skb, struct tc_action *a,
209 spin_lock(&ipt->tcf_lock); 209 spin_lock(&ipt->tcf_lock);
210 210
211 ipt->tcf_tm.lastuse = jiffies; 211 ipt->tcf_tm.lastuse = jiffies;
212 ipt->tcf_bstats.bytes += qdisc_pkt_len(skb); 212 bstats_update(&ipt->tcf_bstats, skb);
213 ipt->tcf_bstats.packets++;
214 213
215 /* yes, we have to worry about both in and out dev 214 /* yes, we have to worry about both in and out dev
216 worry later - danger - this API seems to have changed 215 worry later - danger - this API seems to have changed
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 0c311be92827..d765067e99db 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -165,8 +165,7 @@ static int tcf_mirred(struct sk_buff *skb, struct tc_action *a,
165 165
166 spin_lock(&m->tcf_lock); 166 spin_lock(&m->tcf_lock);
167 m->tcf_tm.lastuse = jiffies; 167 m->tcf_tm.lastuse = jiffies;
168 m->tcf_bstats.bytes += qdisc_pkt_len(skb); 168 bstats_update(&m->tcf_bstats, skb);
169 m->tcf_bstats.packets++;
170 169
171 dev = m->tcfm_dev; 170 dev = m->tcfm_dev;
172 if (!dev) { 171 if (!dev) {
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 186eb837e600..178a4bd7b7cb 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -125,8 +125,7 @@ static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
125 egress = p->flags & TCA_NAT_FLAG_EGRESS; 125 egress = p->flags & TCA_NAT_FLAG_EGRESS;
126 action = p->tcf_action; 126 action = p->tcf_action;
127 127
128 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 128 bstats_update(&p->tcf_bstats, skb);
129 p->tcf_bstats.packets++;
130 129
131 spin_unlock(&p->tcf_lock); 130 spin_unlock(&p->tcf_lock);
132 131
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index a0593c9640db..445bef716f77 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -187,8 +187,7 @@ static int tcf_pedit(struct sk_buff *skb, struct tc_action *a,
187bad: 187bad:
188 p->tcf_qstats.overlimits++; 188 p->tcf_qstats.overlimits++;
189done: 189done:
190 p->tcf_bstats.bytes += qdisc_pkt_len(skb); 190 bstats_update(&p->tcf_bstats, skb);
191 p->tcf_bstats.packets++;
192 spin_unlock(&p->tcf_lock); 191 spin_unlock(&p->tcf_lock);
193 return p->tcf_action; 192 return p->tcf_action;
194} 193}
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 7ebf7439b478..e2f08b1e2e58 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -298,8 +298,7 @@ static int tcf_act_police(struct sk_buff *skb, struct tc_action *a,
298 298
299 spin_lock(&police->tcf_lock); 299 spin_lock(&police->tcf_lock);
300 300
301 police->tcf_bstats.bytes += qdisc_pkt_len(skb); 301 bstats_update(&police->tcf_bstats, skb);
302 police->tcf_bstats.packets++;
303 302
304 if (police->tcfp_ewma_rate && 303 if (police->tcfp_ewma_rate &&
305 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) { 304 police->tcf_rate_est.bps >= police->tcfp_ewma_rate) {
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 97e84f3ee775..7287cff7af3e 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -42,8 +42,7 @@ static int tcf_simp(struct sk_buff *skb, struct tc_action *a, struct tcf_result
42 42
43 spin_lock(&d->tcf_lock); 43 spin_lock(&d->tcf_lock);
44 d->tcf_tm.lastuse = jiffies; 44 d->tcf_tm.lastuse = jiffies;
45 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 45 bstats_update(&d->tcf_bstats, skb);
46 d->tcf_bstats.packets++;
47 46
48 /* print policy string followed by _ then packet count 47 /* print policy string followed by _ then packet count
49 * Example if this was the 3rd packet and the string was "hello" 48 * Example if this was the 3rd packet and the string was "hello"
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 66cbf4eb8855..836f5fee9e58 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -46,8 +46,7 @@ static int tcf_skbedit(struct sk_buff *skb, struct tc_action *a,
46 46
47 spin_lock(&d->tcf_lock); 47 spin_lock(&d->tcf_lock);
48 d->tcf_tm.lastuse = jiffies; 48 d->tcf_tm.lastuse = jiffies;
49 d->tcf_bstats.bytes += qdisc_pkt_len(skb); 49 bstats_update(&d->tcf_bstats, skb);
50 d->tcf_bstats.packets++;
51 50
52 if (d->flags & SKBEDIT_F_PRIORITY) 51 if (d->flags & SKBEDIT_F_PRIORITY)
53 skb->priority = d->priority; 52 skb->priority = d->priority;
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 282540778aa8..943d733409d0 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -422,10 +422,8 @@ drop: __maybe_unused
422 } 422 }
423 return ret; 423 return ret;
424 } 424 }
425 sch->bstats.bytes += qdisc_pkt_len(skb); 425 qdisc_bstats_update(sch, skb);
426 sch->bstats.packets++; 426 bstats_update(&flow->bstats, skb);
427 flow->bstats.bytes += qdisc_pkt_len(skb);
428 flow->bstats.packets++;
429 /* 427 /*
430 * Okay, this may seem weird. We pretend we've dropped the packet if 428 * Okay, this may seem weird. We pretend we've dropped the packet if
431 * it goes via ATM. The reason for this is that the outer qdisc 429 * it goes via ATM. The reason for this is that the outer qdisc
diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
index eb7631590865..c80d1c210c5d 100644
--- a/net/sched/sch_cbq.c
+++ b/net/sched/sch_cbq.c
@@ -390,8 +390,7 @@ cbq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
390 ret = qdisc_enqueue(skb, cl->q); 390 ret = qdisc_enqueue(skb, cl->q);
391 if (ret == NET_XMIT_SUCCESS) { 391 if (ret == NET_XMIT_SUCCESS) {
392 sch->q.qlen++; 392 sch->q.qlen++;
393 sch->bstats.packets++; 393 qdisc_bstats_update(sch, skb);
394 sch->bstats.bytes += qdisc_pkt_len(skb);
395 cbq_mark_toplevel(q, cl); 394 cbq_mark_toplevel(q, cl);
396 if (!cl->next_alive) 395 if (!cl->next_alive)
397 cbq_activate_class(cl); 396 cbq_activate_class(cl);
@@ -650,8 +649,7 @@ static int cbq_reshape_fail(struct sk_buff *skb, struct Qdisc *child)
650 ret = qdisc_enqueue(skb, cl->q); 649 ret = qdisc_enqueue(skb, cl->q);
651 if (ret == NET_XMIT_SUCCESS) { 650 if (ret == NET_XMIT_SUCCESS) {
652 sch->q.qlen++; 651 sch->q.qlen++;
653 sch->bstats.packets++; 652 qdisc_bstats_update(sch, skb);
654 sch->bstats.bytes += qdisc_pkt_len(skb);
655 if (!cl->next_alive) 653 if (!cl->next_alive)
656 cbq_activate_class(cl); 654 cbq_activate_class(cl);
657 return 0; 655 return 0;
diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
index aa8b5313f8cf..de55e642eafc 100644
--- a/net/sched/sch_drr.c
+++ b/net/sched/sch_drr.c
@@ -351,7 +351,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
351{ 351{
352 struct drr_sched *q = qdisc_priv(sch); 352 struct drr_sched *q = qdisc_priv(sch);
353 struct drr_class *cl; 353 struct drr_class *cl;
354 unsigned int len;
355 int err; 354 int err;
356 355
357 cl = drr_classify(skb, sch, &err); 356 cl = drr_classify(skb, sch, &err);
@@ -362,7 +361,6 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
362 return err; 361 return err;
363 } 362 }
364 363
365 len = qdisc_pkt_len(skb);
366 err = qdisc_enqueue(skb, cl->qdisc); 364 err = qdisc_enqueue(skb, cl->qdisc);
367 if (unlikely(err != NET_XMIT_SUCCESS)) { 365 if (unlikely(err != NET_XMIT_SUCCESS)) {
368 if (net_xmit_drop_count(err)) { 366 if (net_xmit_drop_count(err)) {
@@ -377,10 +375,8 @@ static int drr_enqueue(struct sk_buff *skb, struct Qdisc *sch)
377 cl->deficit = cl->quantum; 375 cl->deficit = cl->quantum;
378 } 376 }
379 377
380 cl->bstats.packets++; 378 bstats_update(&cl->bstats, skb);
381 cl->bstats.bytes += len; 379 qdisc_bstats_update(sch, skb);
382 sch->bstats.packets++;
383 sch->bstats.bytes += len;
384 380
385 sch->q.qlen++; 381 sch->q.qlen++;
386 return err; 382 return err;
diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
index 1d295d62bb5c..60f4bdd4408e 100644
--- a/net/sched/sch_dsmark.c
+++ b/net/sched/sch_dsmark.c
@@ -260,8 +260,7 @@ static int dsmark_enqueue(struct sk_buff *skb, struct Qdisc *sch)
260 return err; 260 return err;
261 } 261 }
262 262
263 sch->bstats.bytes += qdisc_pkt_len(skb); 263 qdisc_bstats_update(sch, skb);
264 sch->bstats.packets++;
265 sch->q.qlen++; 264 sch->q.qlen++;
266 265
267 return NET_XMIT_SUCCESS; 266 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 069c62b7bb36..2e45791d4f6c 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1599,10 +1599,8 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch)
1599 if (cl->qdisc->q.qlen == 1) 1599 if (cl->qdisc->q.qlen == 1)
1600 set_active(cl, qdisc_pkt_len(skb)); 1600 set_active(cl, qdisc_pkt_len(skb));
1601 1601
1602 cl->bstats.packets++; 1602 bstats_update(&cl->bstats, skb);
1603 cl->bstats.bytes += qdisc_pkt_len(skb); 1603 qdisc_bstats_update(sch, skb);
1604 sch->bstats.packets++;
1605 sch->bstats.bytes += qdisc_pkt_len(skb);
1606 sch->q.qlen++; 1604 sch->q.qlen++;
1607 1605
1608 return NET_XMIT_SUCCESS; 1606 return NET_XMIT_SUCCESS;
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 01b519d6c52d..984c1b0c6836 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -569,15 +569,12 @@ static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
569 } 569 }
570 return ret; 570 return ret;
571 } else { 571 } else {
572 cl->bstats.packets += 572 bstats_update(&cl->bstats, skb);
573 skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1;
574 cl->bstats.bytes += qdisc_pkt_len(skb);
575 htb_activate(q, cl); 573 htb_activate(q, cl);
576 } 574 }
577 575
578 sch->q.qlen++; 576 sch->q.qlen++;
579 sch->bstats.packets += skb_is_gso(skb)?skb_shinfo(skb)->gso_segs:1; 577 qdisc_bstats_update(sch, skb);
580 sch->bstats.bytes += qdisc_pkt_len(skb);
581 return NET_XMIT_SUCCESS; 578 return NET_XMIT_SUCCESS;
582} 579}
583 580
@@ -648,12 +645,10 @@ static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
648 htb_add_to_wait_tree(q, cl, diff); 645 htb_add_to_wait_tree(q, cl, diff);
649 } 646 }
650 647
651 /* update byte stats except for leaves which are already updated */ 648 /* update basic stats except for leaves which are already updated */
652 if (cl->level) { 649 if (cl->level)
653 cl->bstats.bytes += bytes; 650 bstats_update(&cl->bstats, skb);
654 cl->bstats.packets += skb_is_gso(skb)? 651
655 skb_shinfo(skb)->gso_segs:1;
656 }
657 cl = cl->parent; 652 cl = cl->parent;
658 } 653 }
659} 654}
diff --git a/net/sched/sch_ingress.c b/net/sched/sch_ingress.c
index f10e34a68445..bce1665239b8 100644
--- a/net/sched/sch_ingress.c
+++ b/net/sched/sch_ingress.c
@@ -63,8 +63,7 @@ static int ingress_enqueue(struct sk_buff *skb, struct Qdisc *sch)
63 63
64 result = tc_classify(skb, p->filter_list, &res); 64 result = tc_classify(skb, p->filter_list, &res);
65 65
66 sch->bstats.packets++; 66 qdisc_bstats_update(sch, skb);
67 sch->bstats.bytes += qdisc_pkt_len(skb);
68 switch (result) { 67 switch (result) {
69 case TC_ACT_SHOT: 68 case TC_ACT_SHOT:
70 result = TC_ACT_SHOT; 69 result = TC_ACT_SHOT;
diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
index 32690deab5d0..21f13da24763 100644
--- a/net/sched/sch_multiq.c
+++ b/net/sched/sch_multiq.c
@@ -83,8 +83,7 @@ multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
83 83
84 ret = qdisc_enqueue(skb, qdisc); 84 ret = qdisc_enqueue(skb, qdisc);
85 if (ret == NET_XMIT_SUCCESS) { 85 if (ret == NET_XMIT_SUCCESS) {
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 sch->q.qlen++; 87 sch->q.qlen++;
89 return NET_XMIT_SUCCESS; 88 return NET_XMIT_SUCCESS;
90 } 89 }
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index e5593c083a78..1c4bce863479 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -240,8 +240,7 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch)
240 240
241 if (likely(ret == NET_XMIT_SUCCESS)) { 241 if (likely(ret == NET_XMIT_SUCCESS)) {
242 sch->q.qlen++; 242 sch->q.qlen++;
243 sch->bstats.bytes += qdisc_pkt_len(skb); 243 qdisc_bstats_update(sch, skb);
244 sch->bstats.packets++;
245 } else if (net_xmit_drop_count(ret)) { 244 } else if (net_xmit_drop_count(ret)) {
246 sch->qstats.drops++; 245 sch->qstats.drops++;
247 } 246 }
@@ -477,8 +476,7 @@ static int tfifo_enqueue(struct sk_buff *nskb, struct Qdisc *sch)
477 __skb_queue_after(list, skb, nskb); 476 __skb_queue_after(list, skb, nskb);
478 477
479 sch->qstats.backlog += qdisc_pkt_len(nskb); 478 sch->qstats.backlog += qdisc_pkt_len(nskb);
480 sch->bstats.bytes += qdisc_pkt_len(nskb); 479 qdisc_bstats_update(sch, nskb);
481 sch->bstats.packets++;
482 480
483 return NET_XMIT_SUCCESS; 481 return NET_XMIT_SUCCESS;
484 } 482 }
diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
index b1c95bce33ce..966158d49dd1 100644
--- a/net/sched/sch_prio.c
+++ b/net/sched/sch_prio.c
@@ -84,8 +84,7 @@ prio_enqueue(struct sk_buff *skb, struct Qdisc *sch)
84 84
85 ret = qdisc_enqueue(skb, qdisc); 85 ret = qdisc_enqueue(skb, qdisc);
86 if (ret == NET_XMIT_SUCCESS) { 86 if (ret == NET_XMIT_SUCCESS) {
87 sch->bstats.bytes += qdisc_pkt_len(skb); 87 qdisc_bstats_update(sch, skb);
88 sch->bstats.packets++;
89 sch->q.qlen++; 88 sch->q.qlen++;
90 return NET_XMIT_SUCCESS; 89 return NET_XMIT_SUCCESS;
91 } 90 }
diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
index a67ba3c5a0cc..a6009c5a2c97 100644
--- a/net/sched/sch_red.c
+++ b/net/sched/sch_red.c
@@ -94,8 +94,7 @@ static int red_enqueue(struct sk_buff *skb, struct Qdisc* sch)
94 94
95 ret = qdisc_enqueue(skb, child); 95 ret = qdisc_enqueue(skb, child);
96 if (likely(ret == NET_XMIT_SUCCESS)) { 96 if (likely(ret == NET_XMIT_SUCCESS)) {
97 sch->bstats.bytes += qdisc_pkt_len(skb); 97 qdisc_bstats_update(sch, skb);
98 sch->bstats.packets++;
99 sch->q.qlen++; 98 sch->q.qlen++;
100 } else if (net_xmit_drop_count(ret)) { 99 } else if (net_xmit_drop_count(ret)) {
101 q->stats.pdrop++; 100 q->stats.pdrop++;
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index d54ac94066c2..239ec53a634d 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -403,8 +403,7 @@ sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)
403 slot->allot = q->scaled_quantum; 403 slot->allot = q->scaled_quantum;
404 } 404 }
405 if (++sch->q.qlen <= q->limit) { 405 if (++sch->q.qlen <= q->limit) {
406 sch->bstats.bytes += qdisc_pkt_len(skb); 406 qdisc_bstats_update(sch, skb);
407 sch->bstats.packets++;
408 return NET_XMIT_SUCCESS; 407 return NET_XMIT_SUCCESS;
409 } 408 }
410 409
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 641a30d64635..77565e721811 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -134,8 +134,7 @@ static int tbf_enqueue(struct sk_buff *skb, struct Qdisc* sch)
134 } 134 }
135 135
136 sch->q.qlen++; 136 sch->q.qlen++;
137 sch->bstats.bytes += qdisc_pkt_len(skb); 137 qdisc_bstats_update(sch, skb);
138 sch->bstats.packets++;
139 return NET_XMIT_SUCCESS; 138 return NET_XMIT_SUCCESS;
140} 139}
141 140
diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
index 106479a7c94a..af9360d1f6eb 100644
--- a/net/sched/sch_teql.c
+++ b/net/sched/sch_teql.c
@@ -83,8 +83,7 @@ teql_enqueue(struct sk_buff *skb, struct Qdisc* sch)
83 83
84 if (q->q.qlen < dev->tx_queue_len) { 84 if (q->q.qlen < dev->tx_queue_len) {
85 __skb_queue_tail(&q->q, skb); 85 __skb_queue_tail(&q->q, skb);
86 sch->bstats.bytes += qdisc_pkt_len(skb); 86 qdisc_bstats_update(sch, skb);
87 sch->bstats.packets++;
88 return NET_XMIT_SUCCESS; 87 return NET_XMIT_SUCCESS;
89 } 88 }
90 89
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index afe67849269f..67e31276682a 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -563,8 +563,17 @@ rpcauth_checkverf(struct rpc_task *task, __be32 *p)
563 return cred->cr_ops->crvalidate(task, p); 563 return cred->cr_ops->crvalidate(task, p);
564} 564}
565 565
566static void rpcauth_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
567 __be32 *data, void *obj)
568{
569 struct xdr_stream xdr;
570
571 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, data);
572 encode(rqstp, &xdr, obj);
573}
574
566int 575int
567rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp, 576rpcauth_wrap_req(struct rpc_task *task, kxdreproc_t encode, void *rqstp,
568 __be32 *data, void *obj) 577 __be32 *data, void *obj)
569{ 578{
570 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 579 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -574,11 +583,22 @@ rpcauth_wrap_req(struct rpc_task *task, kxdrproc_t encode, void *rqstp,
574 if (cred->cr_ops->crwrap_req) 583 if (cred->cr_ops->crwrap_req)
575 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj); 584 return cred->cr_ops->crwrap_req(task, encode, rqstp, data, obj);
576 /* By default, we encode the arguments normally. */ 585 /* By default, we encode the arguments normally. */
577 return encode(rqstp, data, obj); 586 rpcauth_wrap_req_encode(encode, rqstp, data, obj);
587 return 0;
588}
589
590static int
591rpcauth_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
592 __be32 *data, void *obj)
593{
594 struct xdr_stream xdr;
595
596 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, data);
597 return decode(rqstp, &xdr, obj);
578} 598}
579 599
580int 600int
581rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp, 601rpcauth_unwrap_resp(struct rpc_task *task, kxdrdproc_t decode, void *rqstp,
582 __be32 *data, void *obj) 602 __be32 *data, void *obj)
583{ 603{
584 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 604 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
@@ -589,7 +609,7 @@ rpcauth_unwrap_resp(struct rpc_task *task, kxdrproc_t decode, void *rqstp,
589 return cred->cr_ops->crunwrap_resp(task, decode, rqstp, 609 return cred->cr_ops->crunwrap_resp(task, decode, rqstp,
590 data, obj); 610 data, obj);
591 /* By default, we decode the arguments normally. */ 611 /* By default, we decode the arguments normally. */
592 return decode(rqstp, data, obj); 612 return rpcauth_unwrap_req_decode(decode, rqstp, data, obj);
593} 613}
594 614
595int 615int
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 3835ce35e224..45dbf1521b9a 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1231,9 +1231,19 @@ out_bad:
1231 return NULL; 1231 return NULL;
1232} 1232}
1233 1233
1234static void gss_wrap_req_encode(kxdreproc_t encode, struct rpc_rqst *rqstp,
1235 __be32 *p, void *obj)
1236{
1237 struct xdr_stream xdr;
1238
1239 xdr_init_encode(&xdr, &rqstp->rq_snd_buf, p);
1240 encode(rqstp, &xdr, obj);
1241}
1242
1234static inline int 1243static inline int
1235gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1244gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1236 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) 1245 kxdreproc_t encode, struct rpc_rqst *rqstp,
1246 __be32 *p, void *obj)
1237{ 1247{
1238 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1248 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1239 struct xdr_buf integ_buf; 1249 struct xdr_buf integ_buf;
@@ -1249,9 +1259,7 @@ gss_wrap_req_integ(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1249 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1259 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1250 *p++ = htonl(rqstp->rq_seqno); 1260 *p++ = htonl(rqstp->rq_seqno);
1251 1261
1252 status = encode(rqstp, p, obj); 1262 gss_wrap_req_encode(encode, rqstp, p, obj);
1253 if (status)
1254 return status;
1255 1263
1256 if (xdr_buf_subsegment(snd_buf, &integ_buf, 1264 if (xdr_buf_subsegment(snd_buf, &integ_buf,
1257 offset, snd_buf->len - offset)) 1265 offset, snd_buf->len - offset))
@@ -1325,7 +1333,8 @@ out:
1325 1333
1326static inline int 1334static inline int
1327gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx, 1335gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1328 kxdrproc_t encode, struct rpc_rqst *rqstp, __be32 *p, void *obj) 1336 kxdreproc_t encode, struct rpc_rqst *rqstp,
1337 __be32 *p, void *obj)
1329{ 1338{
1330 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; 1339 struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
1331 u32 offset; 1340 u32 offset;
@@ -1342,9 +1351,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1342 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base; 1351 offset = (u8 *)p - (u8 *)snd_buf->head[0].iov_base;
1343 *p++ = htonl(rqstp->rq_seqno); 1352 *p++ = htonl(rqstp->rq_seqno);
1344 1353
1345 status = encode(rqstp, p, obj); 1354 gss_wrap_req_encode(encode, rqstp, p, obj);
1346 if (status)
1347 return status;
1348 1355
1349 status = alloc_enc_pages(rqstp); 1356 status = alloc_enc_pages(rqstp);
1350 if (status) 1357 if (status)
@@ -1394,7 +1401,7 @@ gss_wrap_req_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1394 1401
1395static int 1402static int
1396gss_wrap_req(struct rpc_task *task, 1403gss_wrap_req(struct rpc_task *task,
1397 kxdrproc_t encode, void *rqstp, __be32 *p, void *obj) 1404 kxdreproc_t encode, void *rqstp, __be32 *p, void *obj)
1398{ 1405{
1399 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1406 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1400 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1407 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1407,12 +1414,14 @@ gss_wrap_req(struct rpc_task *task,
1407 /* The spec seems a little ambiguous here, but I think that not 1414 /* The spec seems a little ambiguous here, but I think that not
1408 * wrapping context destruction requests makes the most sense. 1415 * wrapping context destruction requests makes the most sense.
1409 */ 1416 */
1410 status = encode(rqstp, p, obj); 1417 gss_wrap_req_encode(encode, rqstp, p, obj);
1418 status = 0;
1411 goto out; 1419 goto out;
1412 } 1420 }
1413 switch (gss_cred->gc_service) { 1421 switch (gss_cred->gc_service) {
1414 case RPC_GSS_SVC_NONE: 1422 case RPC_GSS_SVC_NONE:
1415 status = encode(rqstp, p, obj); 1423 gss_wrap_req_encode(encode, rqstp, p, obj);
1424 status = 0;
1416 break; 1425 break;
1417 case RPC_GSS_SVC_INTEGRITY: 1426 case RPC_GSS_SVC_INTEGRITY:
1418 status = gss_wrap_req_integ(cred, ctx, encode, 1427 status = gss_wrap_req_integ(cred, ctx, encode,
@@ -1494,10 +1503,19 @@ gss_unwrap_resp_priv(struct rpc_cred *cred, struct gss_cl_ctx *ctx,
1494 return 0; 1503 return 0;
1495} 1504}
1496 1505
1506static int
1507gss_unwrap_req_decode(kxdrdproc_t decode, struct rpc_rqst *rqstp,
1508 __be32 *p, void *obj)
1509{
1510 struct xdr_stream xdr;
1511
1512 xdr_init_decode(&xdr, &rqstp->rq_rcv_buf, p);
1513 return decode(rqstp, &xdr, obj);
1514}
1497 1515
1498static int 1516static int
1499gss_unwrap_resp(struct rpc_task *task, 1517gss_unwrap_resp(struct rpc_task *task,
1500 kxdrproc_t decode, void *rqstp, __be32 *p, void *obj) 1518 kxdrdproc_t decode, void *rqstp, __be32 *p, void *obj)
1501{ 1519{
1502 struct rpc_cred *cred = task->tk_rqstp->rq_cred; 1520 struct rpc_cred *cred = task->tk_rqstp->rq_cred;
1503 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, 1521 struct gss_cred *gss_cred = container_of(cred, struct gss_cred,
@@ -1528,7 +1546,7 @@ gss_unwrap_resp(struct rpc_task *task,
1528 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp) 1546 cred->cr_auth->au_rslack = cred->cr_auth->au_verfsize + (p - savedp)
1529 + (savedlen - head->iov_len); 1547 + (savedlen - head->iov_len);
1530out_decode: 1548out_decode:
1531 status = decode(rqstp, p, obj); 1549 status = gss_unwrap_req_decode(decode, rqstp, p, obj);
1532out: 1550out:
1533 gss_put_ctx(ctx); 1551 gss_put_ctx(ctx);
1534 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid, 1552 dprintk("RPC: %5u gss_unwrap_resp returning %d\n", task->tk_pid,
diff --git a/net/sunrpc/bc_svc.c b/net/sunrpc/bc_svc.c
index 7dcfe0cc3500..1dd1a6890007 100644
--- a/net/sunrpc/bc_svc.c
+++ b/net/sunrpc/bc_svc.c
@@ -59,8 +59,8 @@ int bc_send(struct rpc_rqst *req)
59 ret = task->tk_status; 59 ret = task->tk_status;
60 rpc_put_task(task); 60 rpc_put_task(task);
61 } 61 }
62 return ret;
63 dprintk("RPC: bc_send ret= %d\n", ret); 62 dprintk("RPC: bc_send ret= %d\n", ret);
63 return ret;
64} 64}
65 65
66#endif /* CONFIG_NFS_V4_1 */ 66#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 92ce94f5146b..57d344cf2256 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1095,7 +1095,7 @@ static void
1095rpc_xdr_encode(struct rpc_task *task) 1095rpc_xdr_encode(struct rpc_task *task)
1096{ 1096{
1097 struct rpc_rqst *req = task->tk_rqstp; 1097 struct rpc_rqst *req = task->tk_rqstp;
1098 kxdrproc_t encode; 1098 kxdreproc_t encode;
1099 __be32 *p; 1099 __be32 *p;
1100 1100
1101 dprint_status(task); 1101 dprint_status(task);
@@ -1535,7 +1535,7 @@ call_decode(struct rpc_task *task)
1535{ 1535{
1536 struct rpc_clnt *clnt = task->tk_client; 1536 struct rpc_clnt *clnt = task->tk_client;
1537 struct rpc_rqst *req = task->tk_rqstp; 1537 struct rpc_rqst *req = task->tk_rqstp;
1538 kxdrproc_t decode = task->tk_msg.rpc_proc->p_decode; 1538 kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode;
1539 __be32 *p; 1539 __be32 *p;
1540 1540
1541 dprintk("RPC: %5u call_decode (status %d)\n", 1541 dprintk("RPC: %5u call_decode (status %d)\n",
@@ -1776,12 +1776,11 @@ out_overflow:
1776 goto out_garbage; 1776 goto out_garbage;
1777} 1777}
1778 1778
1779static int rpcproc_encode_null(void *rqstp, __be32 *data, void *obj) 1779static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
1780{ 1780{
1781 return 0;
1782} 1781}
1783 1782
1784static int rpcproc_decode_null(void *rqstp, __be32 *data, void *obj) 1783static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj)
1785{ 1784{
1786 return 0; 1785 return 0;
1787} 1786}
@@ -1830,23 +1829,15 @@ static void rpc_show_task(const struct rpc_clnt *clnt,
1830 const struct rpc_task *task) 1829 const struct rpc_task *task)
1831{ 1830{
1832 const char *rpc_waitq = "none"; 1831 const char *rpc_waitq = "none";
1833 char *p, action[KSYM_SYMBOL_LEN];
1834 1832
1835 if (RPC_IS_QUEUED(task)) 1833 if (RPC_IS_QUEUED(task))
1836 rpc_waitq = rpc_qname(task->tk_waitqueue); 1834 rpc_waitq = rpc_qname(task->tk_waitqueue);
1837 1835
1838 /* map tk_action pointer to a function name; then trim off 1836 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n",
1839 * the "+0x0 [sunrpc]" */
1840 sprint_symbol(action, (unsigned long)task->tk_action);
1841 p = strchr(action, '+');
1842 if (p)
1843 *p = '\0';
1844
1845 printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%s q:%s\n",
1846 task->tk_pid, task->tk_flags, task->tk_status, 1837 task->tk_pid, task->tk_flags, task->tk_status,
1847 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, 1838 clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops,
1848 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), 1839 clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task),
1849 action, rpc_waitq); 1840 task->tk_action, rpc_waitq);
1850} 1841}
1851 1842
1852void rpc_show_tasks(void) 1843void rpc_show_tasks(void)
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 09f01f41e55a..72bc53683965 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -474,7 +474,7 @@ static int __rpc_create_common(struct inode *dir, struct dentry *dentry,
474{ 474{
475 struct inode *inode; 475 struct inode *inode;
476 476
477 BUG_ON(!d_unhashed(dentry)); 477 d_drop(dentry);
478 inode = rpc_get_inode(dir->i_sb, mode); 478 inode = rpc_get_inode(dir->i_sb, mode);
479 if (!inode) 479 if (!inode)
480 goto out_err; 480 goto out_err;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index fa6d7ca2c851..c652e4cc9fe9 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -57,10 +57,6 @@ enum {
57 RPCBPROC_GETSTAT, 57 RPCBPROC_GETSTAT,
58}; 58};
59 59
60#define RPCB_HIGHPROC_2 RPCBPROC_CALLIT
61#define RPCB_HIGHPROC_3 RPCBPROC_TADDR2UADDR
62#define RPCB_HIGHPROC_4 RPCBPROC_GETSTAT
63
64/* 60/*
65 * r_owner 61 * r_owner
66 * 62 *
@@ -693,46 +689,37 @@ static void rpcb_getport_done(struct rpc_task *child, void *data)
693 * XDR functions for rpcbind 689 * XDR functions for rpcbind
694 */ 690 */
695 691
696static int rpcb_enc_mapping(struct rpc_rqst *req, __be32 *p, 692static void rpcb_enc_mapping(struct rpc_rqst *req, struct xdr_stream *xdr,
697 const struct rpcbind_args *rpcb) 693 const struct rpcbind_args *rpcb)
698{ 694{
699 struct rpc_task *task = req->rq_task; 695 struct rpc_task *task = req->rq_task;
700 struct xdr_stream xdr; 696 __be32 *p;
701 697
702 dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n", 698 dprintk("RPC: %5u encoding PMAP_%s call (%u, %u, %d, %u)\n",
703 task->tk_pid, task->tk_msg.rpc_proc->p_name, 699 task->tk_pid, task->tk_msg.rpc_proc->p_name,
704 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port); 700 rpcb->r_prog, rpcb->r_vers, rpcb->r_prot, rpcb->r_port);
705 701
706 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 702 p = xdr_reserve_space(xdr, RPCB_mappingargs_sz << 2);
707 703 *p++ = cpu_to_be32(rpcb->r_prog);
708 p = xdr_reserve_space(&xdr, sizeof(__be32) * RPCB_mappingargs_sz); 704 *p++ = cpu_to_be32(rpcb->r_vers);
709 if (unlikely(p == NULL)) 705 *p++ = cpu_to_be32(rpcb->r_prot);
710 return -EIO; 706 *p = cpu_to_be32(rpcb->r_port);
711
712 *p++ = htonl(rpcb->r_prog);
713 *p++ = htonl(rpcb->r_vers);
714 *p++ = htonl(rpcb->r_prot);
715 *p = htonl(rpcb->r_port);
716
717 return 0;
718} 707}
719 708
720static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p, 709static int rpcb_dec_getport(struct rpc_rqst *req, struct xdr_stream *xdr,
721 struct rpcbind_args *rpcb) 710 struct rpcbind_args *rpcb)
722{ 711{
723 struct rpc_task *task = req->rq_task; 712 struct rpc_task *task = req->rq_task;
724 struct xdr_stream xdr;
725 unsigned long port; 713 unsigned long port;
726 714 __be32 *p;
727 xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
728 715
729 rpcb->r_port = 0; 716 rpcb->r_port = 0;
730 717
731 p = xdr_inline_decode(&xdr, sizeof(__be32)); 718 p = xdr_inline_decode(xdr, 4);
732 if (unlikely(p == NULL)) 719 if (unlikely(p == NULL))
733 return -EIO; 720 return -EIO;
734 721
735 port = ntohl(*p); 722 port = be32_to_cpup(p);
736 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid, 723 dprintk("RPC: %5u PMAP_%s result: %lu\n", task->tk_pid,
737 task->tk_msg.rpc_proc->p_name, port); 724 task->tk_msg.rpc_proc->p_name, port);
738 if (unlikely(port > USHRT_MAX)) 725 if (unlikely(port > USHRT_MAX))
@@ -742,20 +729,18 @@ static int rpcb_dec_getport(struct rpc_rqst *req, __be32 *p,
742 return 0; 729 return 0;
743} 730}
744 731
745static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p, 732static int rpcb_dec_set(struct rpc_rqst *req, struct xdr_stream *xdr,
746 unsigned int *boolp) 733 unsigned int *boolp)
747{ 734{
748 struct rpc_task *task = req->rq_task; 735 struct rpc_task *task = req->rq_task;
749 struct xdr_stream xdr; 736 __be32 *p;
750
751 xdr_init_decode(&xdr, &req->rq_rcv_buf, p);
752 737
753 p = xdr_inline_decode(&xdr, sizeof(__be32)); 738 p = xdr_inline_decode(xdr, 4);
754 if (unlikely(p == NULL)) 739 if (unlikely(p == NULL))
755 return -EIO; 740 return -EIO;
756 741
757 *boolp = 0; 742 *boolp = 0;
758 if (*p) 743 if (*p != xdr_zero)
759 *boolp = 1; 744 *boolp = 1;
760 745
761 dprintk("RPC: %5u RPCB_%s call %s\n", 746 dprintk("RPC: %5u RPCB_%s call %s\n",
@@ -764,73 +749,53 @@ static int rpcb_dec_set(struct rpc_rqst *req, __be32 *p,
764 return 0; 749 return 0;
765} 750}
766 751
767static int encode_rpcb_string(struct xdr_stream *xdr, const char *string, 752static void encode_rpcb_string(struct xdr_stream *xdr, const char *string,
768 const u32 maxstrlen) 753 const u32 maxstrlen)
769{ 754{
770 u32 len;
771 __be32 *p; 755 __be32 *p;
756 u32 len;
772 757
773 if (unlikely(string == NULL))
774 return -EIO;
775 len = strlen(string); 758 len = strlen(string);
776 if (unlikely(len > maxstrlen)) 759 BUG_ON(len > maxstrlen);
777 return -EIO; 760 p = xdr_reserve_space(xdr, 4 + len);
778
779 p = xdr_reserve_space(xdr, sizeof(__be32) + len);
780 if (unlikely(p == NULL))
781 return -EIO;
782 xdr_encode_opaque(p, string, len); 761 xdr_encode_opaque(p, string, len);
783
784 return 0;
785} 762}
786 763
787static int rpcb_enc_getaddr(struct rpc_rqst *req, __be32 *p, 764static void rpcb_enc_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
788 const struct rpcbind_args *rpcb) 765 const struct rpcbind_args *rpcb)
789{ 766{
790 struct rpc_task *task = req->rq_task; 767 struct rpc_task *task = req->rq_task;
791 struct xdr_stream xdr; 768 __be32 *p;
792 769
793 dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n", 770 dprintk("RPC: %5u encoding RPCB_%s call (%u, %u, '%s', '%s')\n",
794 task->tk_pid, task->tk_msg.rpc_proc->p_name, 771 task->tk_pid, task->tk_msg.rpc_proc->p_name,
795 rpcb->r_prog, rpcb->r_vers, 772 rpcb->r_prog, rpcb->r_vers,
796 rpcb->r_netid, rpcb->r_addr); 773 rpcb->r_netid, rpcb->r_addr);
797 774
798 xdr_init_encode(&xdr, &req->rq_snd_buf, p); 775 p = xdr_reserve_space(xdr, (RPCB_program_sz + RPCB_version_sz) << 2);
799 776 *p++ = cpu_to_be32(rpcb->r_prog);
800 p = xdr_reserve_space(&xdr, 777 *p = cpu_to_be32(rpcb->r_vers);
801 sizeof(__be32) * (RPCB_program_sz + RPCB_version_sz));
802 if (unlikely(p == NULL))
803 return -EIO;
804 *p++ = htonl(rpcb->r_prog);
805 *p = htonl(rpcb->r_vers);
806
807 if (encode_rpcb_string(&xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN))
808 return -EIO;
809 if (encode_rpcb_string(&xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN))
810 return -EIO;
811 if (encode_rpcb_string(&xdr, rpcb->r_owner, RPCB_MAXOWNERLEN))
812 return -EIO;
813 778
814 return 0; 779 encode_rpcb_string(xdr, rpcb->r_netid, RPCBIND_MAXNETIDLEN);
780 encode_rpcb_string(xdr, rpcb->r_addr, RPCBIND_MAXUADDRLEN);
781 encode_rpcb_string(xdr, rpcb->r_owner, RPCB_MAXOWNERLEN);
815} 782}
816 783
817static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p, 784static int rpcb_dec_getaddr(struct rpc_rqst *req, struct xdr_stream *xdr,
818 struct rpcbind_args *rpcb) 785 struct rpcbind_args *rpcb)
819{ 786{
820 struct sockaddr_storage address; 787 struct sockaddr_storage address;
821 struct sockaddr *sap = (struct sockaddr *)&address; 788 struct sockaddr *sap = (struct sockaddr *)&address;
822 struct rpc_task *task = req->rq_task; 789 struct rpc_task *task = req->rq_task;
823 struct xdr_stream xdr; 790 __be32 *p;
824 u32 len; 791 u32 len;
825 792
826 rpcb->r_port = 0; 793 rpcb->r_port = 0;
827 794
828 xdr_init_decode(&xdr, &req->rq_rcv_buf, p); 795 p = xdr_inline_decode(xdr, 4);
829
830 p = xdr_inline_decode(&xdr, sizeof(__be32));
831 if (unlikely(p == NULL)) 796 if (unlikely(p == NULL))
832 goto out_fail; 797 goto out_fail;
833 len = ntohl(*p); 798 len = be32_to_cpup(p);
834 799
835 /* 800 /*
836 * If the returned universal address is a null string, 801 * If the returned universal address is a null string,
@@ -845,7 +810,7 @@ static int rpcb_dec_getaddr(struct rpc_rqst *req, __be32 *p,
845 if (unlikely(len > RPCBIND_MAXUADDRLEN)) 810 if (unlikely(len > RPCBIND_MAXUADDRLEN))
846 goto out_fail; 811 goto out_fail;
847 812
848 p = xdr_inline_decode(&xdr, len); 813 p = xdr_inline_decode(xdr, len);
849 if (unlikely(p == NULL)) 814 if (unlikely(p == NULL))
850 goto out_fail; 815 goto out_fail;
851 dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid, 816 dprintk("RPC: %5u RPCB_%s reply: %s\n", task->tk_pid,
@@ -871,8 +836,8 @@ out_fail:
871static struct rpc_procinfo rpcb_procedures2[] = { 836static struct rpc_procinfo rpcb_procedures2[] = {
872 [RPCBPROC_SET] = { 837 [RPCBPROC_SET] = {
873 .p_proc = RPCBPROC_SET, 838 .p_proc = RPCBPROC_SET,
874 .p_encode = (kxdrproc_t)rpcb_enc_mapping, 839 .p_encode = (kxdreproc_t)rpcb_enc_mapping,
875 .p_decode = (kxdrproc_t)rpcb_dec_set, 840 .p_decode = (kxdrdproc_t)rpcb_dec_set,
876 .p_arglen = RPCB_mappingargs_sz, 841 .p_arglen = RPCB_mappingargs_sz,
877 .p_replen = RPCB_setres_sz, 842 .p_replen = RPCB_setres_sz,
878 .p_statidx = RPCBPROC_SET, 843 .p_statidx = RPCBPROC_SET,
@@ -881,8 +846,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
881 }, 846 },
882 [RPCBPROC_UNSET] = { 847 [RPCBPROC_UNSET] = {
883 .p_proc = RPCBPROC_UNSET, 848 .p_proc = RPCBPROC_UNSET,
884 .p_encode = (kxdrproc_t)rpcb_enc_mapping, 849 .p_encode = (kxdreproc_t)rpcb_enc_mapping,
885 .p_decode = (kxdrproc_t)rpcb_dec_set, 850 .p_decode = (kxdrdproc_t)rpcb_dec_set,
886 .p_arglen = RPCB_mappingargs_sz, 851 .p_arglen = RPCB_mappingargs_sz,
887 .p_replen = RPCB_setres_sz, 852 .p_replen = RPCB_setres_sz,
888 .p_statidx = RPCBPROC_UNSET, 853 .p_statidx = RPCBPROC_UNSET,
@@ -891,8 +856,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
891 }, 856 },
892 [RPCBPROC_GETPORT] = { 857 [RPCBPROC_GETPORT] = {
893 .p_proc = RPCBPROC_GETPORT, 858 .p_proc = RPCBPROC_GETPORT,
894 .p_encode = (kxdrproc_t)rpcb_enc_mapping, 859 .p_encode = (kxdreproc_t)rpcb_enc_mapping,
895 .p_decode = (kxdrproc_t)rpcb_dec_getport, 860 .p_decode = (kxdrdproc_t)rpcb_dec_getport,
896 .p_arglen = RPCB_mappingargs_sz, 861 .p_arglen = RPCB_mappingargs_sz,
897 .p_replen = RPCB_getportres_sz, 862 .p_replen = RPCB_getportres_sz,
898 .p_statidx = RPCBPROC_GETPORT, 863 .p_statidx = RPCBPROC_GETPORT,
@@ -904,8 +869,8 @@ static struct rpc_procinfo rpcb_procedures2[] = {
904static struct rpc_procinfo rpcb_procedures3[] = { 869static struct rpc_procinfo rpcb_procedures3[] = {
905 [RPCBPROC_SET] = { 870 [RPCBPROC_SET] = {
906 .p_proc = RPCBPROC_SET, 871 .p_proc = RPCBPROC_SET,
907 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 872 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
908 .p_decode = (kxdrproc_t)rpcb_dec_set, 873 .p_decode = (kxdrdproc_t)rpcb_dec_set,
909 .p_arglen = RPCB_getaddrargs_sz, 874 .p_arglen = RPCB_getaddrargs_sz,
910 .p_replen = RPCB_setres_sz, 875 .p_replen = RPCB_setres_sz,
911 .p_statidx = RPCBPROC_SET, 876 .p_statidx = RPCBPROC_SET,
@@ -914,8 +879,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
914 }, 879 },
915 [RPCBPROC_UNSET] = { 880 [RPCBPROC_UNSET] = {
916 .p_proc = RPCBPROC_UNSET, 881 .p_proc = RPCBPROC_UNSET,
917 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 882 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
918 .p_decode = (kxdrproc_t)rpcb_dec_set, 883 .p_decode = (kxdrdproc_t)rpcb_dec_set,
919 .p_arglen = RPCB_getaddrargs_sz, 884 .p_arglen = RPCB_getaddrargs_sz,
920 .p_replen = RPCB_setres_sz, 885 .p_replen = RPCB_setres_sz,
921 .p_statidx = RPCBPROC_UNSET, 886 .p_statidx = RPCBPROC_UNSET,
@@ -924,8 +889,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
924 }, 889 },
925 [RPCBPROC_GETADDR] = { 890 [RPCBPROC_GETADDR] = {
926 .p_proc = RPCBPROC_GETADDR, 891 .p_proc = RPCBPROC_GETADDR,
927 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 892 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
928 .p_decode = (kxdrproc_t)rpcb_dec_getaddr, 893 .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
929 .p_arglen = RPCB_getaddrargs_sz, 894 .p_arglen = RPCB_getaddrargs_sz,
930 .p_replen = RPCB_getaddrres_sz, 895 .p_replen = RPCB_getaddrres_sz,
931 .p_statidx = RPCBPROC_GETADDR, 896 .p_statidx = RPCBPROC_GETADDR,
@@ -937,8 +902,8 @@ static struct rpc_procinfo rpcb_procedures3[] = {
937static struct rpc_procinfo rpcb_procedures4[] = { 902static struct rpc_procinfo rpcb_procedures4[] = {
938 [RPCBPROC_SET] = { 903 [RPCBPROC_SET] = {
939 .p_proc = RPCBPROC_SET, 904 .p_proc = RPCBPROC_SET,
940 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 905 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
941 .p_decode = (kxdrproc_t)rpcb_dec_set, 906 .p_decode = (kxdrdproc_t)rpcb_dec_set,
942 .p_arglen = RPCB_getaddrargs_sz, 907 .p_arglen = RPCB_getaddrargs_sz,
943 .p_replen = RPCB_setres_sz, 908 .p_replen = RPCB_setres_sz,
944 .p_statidx = RPCBPROC_SET, 909 .p_statidx = RPCBPROC_SET,
@@ -947,8 +912,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
947 }, 912 },
948 [RPCBPROC_UNSET] = { 913 [RPCBPROC_UNSET] = {
949 .p_proc = RPCBPROC_UNSET, 914 .p_proc = RPCBPROC_UNSET,
950 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 915 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
951 .p_decode = (kxdrproc_t)rpcb_dec_set, 916 .p_decode = (kxdrdproc_t)rpcb_dec_set,
952 .p_arglen = RPCB_getaddrargs_sz, 917 .p_arglen = RPCB_getaddrargs_sz,
953 .p_replen = RPCB_setres_sz, 918 .p_replen = RPCB_setres_sz,
954 .p_statidx = RPCBPROC_UNSET, 919 .p_statidx = RPCBPROC_UNSET,
@@ -957,8 +922,8 @@ static struct rpc_procinfo rpcb_procedures4[] = {
957 }, 922 },
958 [RPCBPROC_GETADDR] = { 923 [RPCBPROC_GETADDR] = {
959 .p_proc = RPCBPROC_GETADDR, 924 .p_proc = RPCBPROC_GETADDR,
960 .p_encode = (kxdrproc_t)rpcb_enc_getaddr, 925 .p_encode = (kxdreproc_t)rpcb_enc_getaddr,
961 .p_decode = (kxdrproc_t)rpcb_dec_getaddr, 926 .p_decode = (kxdrdproc_t)rpcb_dec_getaddr,
962 .p_arglen = RPCB_getaddrargs_sz, 927 .p_arglen = RPCB_getaddrargs_sz,
963 .p_replen = RPCB_getaddrres_sz, 928 .p_replen = RPCB_getaddrres_sz,
964 .p_statidx = RPCBPROC_GETADDR, 929 .p_statidx = RPCBPROC_GETADDR,
@@ -993,19 +958,19 @@ static struct rpcb_info rpcb_next_version6[] = {
993 958
994static struct rpc_version rpcb_version2 = { 959static struct rpc_version rpcb_version2 = {
995 .number = RPCBVERS_2, 960 .number = RPCBVERS_2,
996 .nrprocs = RPCB_HIGHPROC_2, 961 .nrprocs = ARRAY_SIZE(rpcb_procedures2),
997 .procs = rpcb_procedures2 962 .procs = rpcb_procedures2
998}; 963};
999 964
1000static struct rpc_version rpcb_version3 = { 965static struct rpc_version rpcb_version3 = {
1001 .number = RPCBVERS_3, 966 .number = RPCBVERS_3,
1002 .nrprocs = RPCB_HIGHPROC_3, 967 .nrprocs = ARRAY_SIZE(rpcb_procedures3),
1003 .procs = rpcb_procedures3 968 .procs = rpcb_procedures3
1004}; 969};
1005 970
1006static struct rpc_version rpcb_version4 = { 971static struct rpc_version rpcb_version4 = {
1007 .number = RPCBVERS_4, 972 .number = RPCBVERS_4,
1008 .nrprocs = RPCB_HIGHPROC_4, 973 .nrprocs = ARRAY_SIZE(rpcb_procedures4),
1009 .procs = rpcb_procedures4 974 .procs = rpcb_procedures4
1010}; 975};
1011 976
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 6359c42c4941..0e659c665a8d 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -488,10 +488,6 @@ svc_destroy(struct svc_serv *serv)
488 if (svc_serv_is_pooled(serv)) 488 if (svc_serv_is_pooled(serv))
489 svc_pool_map_put(); 489 svc_pool_map_put();
490 490
491#if defined(CONFIG_NFS_V4_1)
492 svc_sock_destroy(serv->bc_xprt);
493#endif /* CONFIG_NFS_V4_1 */
494
495 svc_unregister(serv); 491 svc_unregister(serv);
496 kfree(serv->sv_pools); 492 kfree(serv->sv_pools);
497 kfree(serv); 493 kfree(serv);
@@ -1147,7 +1143,6 @@ svc_process_common(struct svc_rqst *rqstp, struct kvec *argv, struct kvec *resv)
1147 dropit: 1143 dropit:
1148 svc_authorise(rqstp); /* doesn't hurt to call this twice */ 1144 svc_authorise(rqstp); /* doesn't hurt to call this twice */
1149 dprintk("svc: svc_process dropit\n"); 1145 dprintk("svc: svc_process dropit\n");
1150 svc_drop(rqstp);
1151 return 0; 1146 return 0;
1152 1147
1153err_short_len: 1148err_short_len:
@@ -1218,7 +1213,6 @@ svc_process(struct svc_rqst *rqstp)
1218 struct kvec *resv = &rqstp->rq_res.head[0]; 1213 struct kvec *resv = &rqstp->rq_res.head[0];
1219 struct svc_serv *serv = rqstp->rq_server; 1214 struct svc_serv *serv = rqstp->rq_server;
1220 u32 dir; 1215 u32 dir;
1221 int error;
1222 1216
1223 /* 1217 /*
1224 * Setup response xdr_buf. 1218 * Setup response xdr_buf.
@@ -1246,11 +1240,13 @@ svc_process(struct svc_rqst *rqstp)
1246 return 0; 1240 return 0;
1247 } 1241 }
1248 1242
1249 error = svc_process_common(rqstp, argv, resv); 1243 /* Returns 1 for send, 0 for drop */
1250 if (error <= 0) 1244 if (svc_process_common(rqstp, argv, resv))
1251 return error; 1245 return svc_send(rqstp);
1252 1246 else {
1253 return svc_send(rqstp); 1247 svc_drop(rqstp);
1248 return 0;
1249 }
1254} 1250}
1255 1251
1256#if defined(CONFIG_NFS_V4_1) 1252#if defined(CONFIG_NFS_V4_1)
@@ -1264,10 +1260,9 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1264{ 1260{
1265 struct kvec *argv = &rqstp->rq_arg.head[0]; 1261 struct kvec *argv = &rqstp->rq_arg.head[0];
1266 struct kvec *resv = &rqstp->rq_res.head[0]; 1262 struct kvec *resv = &rqstp->rq_res.head[0];
1267 int error;
1268 1263
1269 /* Build the svc_rqst used by the common processing routine */ 1264 /* Build the svc_rqst used by the common processing routine */
1270 rqstp->rq_xprt = serv->bc_xprt; 1265 rqstp->rq_xprt = serv->sv_bc_xprt;
1271 rqstp->rq_xid = req->rq_xid; 1266 rqstp->rq_xid = req->rq_xid;
1272 rqstp->rq_prot = req->rq_xprt->prot; 1267 rqstp->rq_prot = req->rq_xprt->prot;
1273 rqstp->rq_server = serv; 1268 rqstp->rq_server = serv;
@@ -1292,12 +1287,15 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1292 svc_getu32(argv); /* XID */ 1287 svc_getu32(argv); /* XID */
1293 svc_getnl(argv); /* CALLDIR */ 1288 svc_getnl(argv); /* CALLDIR */
1294 1289
1295 error = svc_process_common(rqstp, argv, resv); 1290 /* Returns 1 for send, 0 for drop */
1296 if (error <= 0) 1291 if (svc_process_common(rqstp, argv, resv)) {
1297 return error; 1292 memcpy(&req->rq_snd_buf, &rqstp->rq_res,
1298 1293 sizeof(req->rq_snd_buf));
1299 memcpy(&req->rq_snd_buf, &rqstp->rq_res, sizeof(req->rq_snd_buf)); 1294 return bc_send(req);
1300 return bc_send(req); 1295 } else {
1296 /* Nothing to do to drop request */
1297 return 0;
1298 }
1301} 1299}
1302EXPORT_SYMBOL(bc_svc_process); 1300EXPORT_SYMBOL(bc_svc_process);
1303#endif /* CONFIG_NFS_V4_1 */ 1301#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 07919e16be3e..d265aa700bb3 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -66,6 +66,13 @@ static void svc_sock_free(struct svc_xprt *);
66static struct svc_xprt *svc_create_socket(struct svc_serv *, int, 66static struct svc_xprt *svc_create_socket(struct svc_serv *, int,
67 struct net *, struct sockaddr *, 67 struct net *, struct sockaddr *,
68 int, int); 68 int, int);
69#if defined(CONFIG_NFS_V4_1)
70static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
71 struct net *, struct sockaddr *,
72 int, int);
73static void svc_bc_sock_free(struct svc_xprt *xprt);
74#endif /* CONFIG_NFS_V4_1 */
75
69#ifdef CONFIG_DEBUG_LOCK_ALLOC 76#ifdef CONFIG_DEBUG_LOCK_ALLOC
70static struct lock_class_key svc_key[2]; 77static struct lock_class_key svc_key[2];
71static struct lock_class_key svc_slock_key[2]; 78static struct lock_class_key svc_slock_key[2];
@@ -1184,6 +1191,57 @@ static struct svc_xprt *svc_tcp_create(struct svc_serv *serv,
1184 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags); 1191 return svc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
1185} 1192}
1186 1193
1194#if defined(CONFIG_NFS_V4_1)
1195static struct svc_xprt *svc_bc_create_socket(struct svc_serv *, int,
1196 struct net *, struct sockaddr *,
1197 int, int);
1198static void svc_bc_sock_free(struct svc_xprt *xprt);
1199
1200static struct svc_xprt *svc_bc_tcp_create(struct svc_serv *serv,
1201 struct net *net,
1202 struct sockaddr *sa, int salen,
1203 int flags)
1204{
1205 return svc_bc_create_socket(serv, IPPROTO_TCP, net, sa, salen, flags);
1206}
1207
1208static void svc_bc_tcp_sock_detach(struct svc_xprt *xprt)
1209{
1210}
1211
1212static struct svc_xprt_ops svc_tcp_bc_ops = {
1213 .xpo_create = svc_bc_tcp_create,
1214 .xpo_detach = svc_bc_tcp_sock_detach,
1215 .xpo_free = svc_bc_sock_free,
1216 .xpo_prep_reply_hdr = svc_tcp_prep_reply_hdr,
1217};
1218
1219static struct svc_xprt_class svc_tcp_bc_class = {
1220 .xcl_name = "tcp-bc",
1221 .xcl_owner = THIS_MODULE,
1222 .xcl_ops = &svc_tcp_bc_ops,
1223 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP,
1224};
1225
1226static void svc_init_bc_xprt_sock(void)
1227{
1228 svc_reg_xprt_class(&svc_tcp_bc_class);
1229}
1230
1231static void svc_cleanup_bc_xprt_sock(void)
1232{
1233 svc_unreg_xprt_class(&svc_tcp_bc_class);
1234}
1235#else /* CONFIG_NFS_V4_1 */
1236static void svc_init_bc_xprt_sock(void)
1237{
1238}
1239
1240static void svc_cleanup_bc_xprt_sock(void)
1241{
1242}
1243#endif /* CONFIG_NFS_V4_1 */
1244
1187static struct svc_xprt_ops svc_tcp_ops = { 1245static struct svc_xprt_ops svc_tcp_ops = {
1188 .xpo_create = svc_tcp_create, 1246 .xpo_create = svc_tcp_create,
1189 .xpo_recvfrom = svc_tcp_recvfrom, 1247 .xpo_recvfrom = svc_tcp_recvfrom,
@@ -1207,12 +1265,14 @@ void svc_init_xprt_sock(void)
1207{ 1265{
1208 svc_reg_xprt_class(&svc_tcp_class); 1266 svc_reg_xprt_class(&svc_tcp_class);
1209 svc_reg_xprt_class(&svc_udp_class); 1267 svc_reg_xprt_class(&svc_udp_class);
1268 svc_init_bc_xprt_sock();
1210} 1269}
1211 1270
1212void svc_cleanup_xprt_sock(void) 1271void svc_cleanup_xprt_sock(void)
1213{ 1272{
1214 svc_unreg_xprt_class(&svc_tcp_class); 1273 svc_unreg_xprt_class(&svc_tcp_class);
1215 svc_unreg_xprt_class(&svc_udp_class); 1274 svc_unreg_xprt_class(&svc_udp_class);
1275 svc_cleanup_bc_xprt_sock();
1216} 1276}
1217 1277
1218static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) 1278static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
@@ -1509,41 +1569,45 @@ static void svc_sock_free(struct svc_xprt *xprt)
1509 kfree(svsk); 1569 kfree(svsk);
1510} 1570}
1511 1571
1572#if defined(CONFIG_NFS_V4_1)
1512/* 1573/*
1513 * Create a svc_xprt. 1574 * Create a back channel svc_xprt which shares the fore channel socket.
1514 *
1515 * For internal use only (e.g. nfsv4.1 backchannel).
1516 * Callers should typically use the xpo_create() method.
1517 */ 1575 */
1518struct svc_xprt *svc_sock_create(struct svc_serv *serv, int prot) 1576static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv,
1577 int protocol,
1578 struct net *net,
1579 struct sockaddr *sin, int len,
1580 int flags)
1519{ 1581{
1520 struct svc_sock *svsk; 1582 struct svc_sock *svsk;
1521 struct svc_xprt *xprt = NULL; 1583 struct svc_xprt *xprt;
1584
1585 if (protocol != IPPROTO_TCP) {
1586 printk(KERN_WARNING "svc: only TCP sockets"
1587 " supported on shared back channel\n");
1588 return ERR_PTR(-EINVAL);
1589 }
1522 1590
1523 dprintk("svc: %s\n", __func__);
1524 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL); 1591 svsk = kzalloc(sizeof(*svsk), GFP_KERNEL);
1525 if (!svsk) 1592 if (!svsk)
1526 goto out; 1593 return ERR_PTR(-ENOMEM);
1527 1594
1528 xprt = &svsk->sk_xprt; 1595 xprt = &svsk->sk_xprt;
1529 if (prot == IPPROTO_TCP) 1596 svc_xprt_init(&svc_tcp_bc_class, xprt, serv);
1530 svc_xprt_init(&svc_tcp_class, xprt, serv); 1597
1531 else if (prot == IPPROTO_UDP) 1598 serv->sv_bc_xprt = xprt;
1532 svc_xprt_init(&svc_udp_class, xprt, serv); 1599
1533 else
1534 BUG();
1535out:
1536 dprintk("svc: %s return %p\n", __func__, xprt);
1537 return xprt; 1600 return xprt;
1538} 1601}
1539EXPORT_SYMBOL_GPL(svc_sock_create);
1540 1602
1541/* 1603/*
1542 * Destroy a svc_sock. 1604 * Free a back channel svc_sock.
1543 */ 1605 */
1544void svc_sock_destroy(struct svc_xprt *xprt) 1606static void svc_bc_sock_free(struct svc_xprt *xprt)
1545{ 1607{
1546 if (xprt) 1608 if (xprt) {
1609 kfree(xprt->xpt_bc_sid);
1547 kfree(container_of(xprt, struct svc_sock, sk_xprt)); 1610 kfree(container_of(xprt, struct svc_sock, sk_xprt));
1611 }
1548} 1612}
1549EXPORT_SYMBOL_GPL(svc_sock_destroy); 1613#endif /* CONFIG_NFS_V4_1 */
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index cd9e841e7492..679cd674b81d 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -552,6 +552,74 @@ void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int b
552} 552}
553EXPORT_SYMBOL_GPL(xdr_write_pages); 553EXPORT_SYMBOL_GPL(xdr_write_pages);
554 554
555static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
556 __be32 *p, unsigned int len)
557{
558 if (len > iov->iov_len)
559 len = iov->iov_len;
560 if (p == NULL)
561 p = (__be32*)iov->iov_base;
562 xdr->p = p;
563 xdr->end = (__be32*)(iov->iov_base + len);
564 xdr->iov = iov;
565 xdr->page_ptr = NULL;
566}
567
568static int xdr_set_page_base(struct xdr_stream *xdr,
569 unsigned int base, unsigned int len)
570{
571 unsigned int pgnr;
572 unsigned int maxlen;
573 unsigned int pgoff;
574 unsigned int pgend;
575 void *kaddr;
576
577 maxlen = xdr->buf->page_len;
578 if (base >= maxlen)
579 return -EINVAL;
580 maxlen -= base;
581 if (len > maxlen)
582 len = maxlen;
583
584 base += xdr->buf->page_base;
585
586 pgnr = base >> PAGE_SHIFT;
587 xdr->page_ptr = &xdr->buf->pages[pgnr];
588 kaddr = page_address(*xdr->page_ptr);
589
590 pgoff = base & ~PAGE_MASK;
591 xdr->p = (__be32*)(kaddr + pgoff);
592
593 pgend = pgoff + len;
594 if (pgend > PAGE_SIZE)
595 pgend = PAGE_SIZE;
596 xdr->end = (__be32*)(kaddr + pgend);
597 xdr->iov = NULL;
598 return 0;
599}
600
601static void xdr_set_next_page(struct xdr_stream *xdr)
602{
603 unsigned int newbase;
604
605 newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
606 newbase -= xdr->buf->page_base;
607
608 if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
609 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
610}
611
612static bool xdr_set_next_buffer(struct xdr_stream *xdr)
613{
614 if (xdr->page_ptr != NULL)
615 xdr_set_next_page(xdr);
616 else if (xdr->iov == xdr->buf->head) {
617 if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
618 xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
619 }
620 return xdr->p != xdr->end;
621}
622
555/** 623/**
556 * xdr_init_decode - Initialize an xdr_stream for decoding data. 624 * xdr_init_decode - Initialize an xdr_stream for decoding data.
557 * @xdr: pointer to xdr_stream struct 625 * @xdr: pointer to xdr_stream struct
@@ -560,41 +628,67 @@ EXPORT_SYMBOL_GPL(xdr_write_pages);
560 */ 628 */
561void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p) 629void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
562{ 630{
563 struct kvec *iov = buf->head;
564 unsigned int len = iov->iov_len;
565
566 if (len > buf->len)
567 len = buf->len;
568 xdr->buf = buf; 631 xdr->buf = buf;
569 xdr->iov = iov; 632 xdr->scratch.iov_base = NULL;
570 xdr->p = p; 633 xdr->scratch.iov_len = 0;
571 xdr->end = (__be32 *)((char *)iov->iov_base + len); 634 if (buf->head[0].iov_len != 0)
635 xdr_set_iov(xdr, buf->head, p, buf->len);
636 else if (buf->page_len != 0)
637 xdr_set_page_base(xdr, 0, buf->len);
572} 638}
573EXPORT_SYMBOL_GPL(xdr_init_decode); 639EXPORT_SYMBOL_GPL(xdr_init_decode);
574 640
575/** 641static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
576 * xdr_inline_peek - Allow read-ahead in the XDR data stream
577 * @xdr: pointer to xdr_stream struct
578 * @nbytes: number of bytes of data to decode
579 *
580 * Check if the input buffer is long enough to enable us to decode
581 * 'nbytes' more bytes of data starting at the current position.
582 * If so return the current pointer without updating the current
583 * pointer position.
584 */
585__be32 * xdr_inline_peek(struct xdr_stream *xdr, size_t nbytes)
586{ 642{
587 __be32 *p = xdr->p; 643 __be32 *p = xdr->p;
588 __be32 *q = p + XDR_QUADLEN(nbytes); 644 __be32 *q = p + XDR_QUADLEN(nbytes);
589 645
590 if (unlikely(q > xdr->end || q < p)) 646 if (unlikely(q > xdr->end || q < p))
591 return NULL; 647 return NULL;
648 xdr->p = q;
592 return p; 649 return p;
593} 650}
594EXPORT_SYMBOL_GPL(xdr_inline_peek);
595 651
596/** 652/**
597 * xdr_inline_decode - Retrieve non-page XDR data to decode 653 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
654 * @xdr: pointer to xdr_stream struct
655 * @buf: pointer to an empty buffer
656 * @buflen: size of 'buf'
657 *
658 * The scratch buffer is used when decoding from an array of pages.
659 * If an xdr_inline_decode() call spans across page boundaries, then
660 * we copy the data into the scratch buffer in order to allow linear
661 * access.
662 */
663void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
664{
665 xdr->scratch.iov_base = buf;
666 xdr->scratch.iov_len = buflen;
667}
668EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
669
670static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
671{
672 __be32 *p;
673 void *cpdest = xdr->scratch.iov_base;
674 size_t cplen = (char *)xdr->end - (char *)xdr->p;
675
676 if (nbytes > xdr->scratch.iov_len)
677 return NULL;
678 memcpy(cpdest, xdr->p, cplen);
679 cpdest += cplen;
680 nbytes -= cplen;
681 if (!xdr_set_next_buffer(xdr))
682 return NULL;
683 p = __xdr_inline_decode(xdr, nbytes);
684 if (p == NULL)
685 return NULL;
686 memcpy(cpdest, p, nbytes);
687 return xdr->scratch.iov_base;
688}
689
690/**
691 * xdr_inline_decode - Retrieve XDR data to decode
598 * @xdr: pointer to xdr_stream struct 692 * @xdr: pointer to xdr_stream struct
599 * @nbytes: number of bytes of data to decode 693 * @nbytes: number of bytes of data to decode
600 * 694 *
@@ -605,13 +699,16 @@ EXPORT_SYMBOL_GPL(xdr_inline_peek);
605 */ 699 */
606__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes) 700__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
607{ 701{
608 __be32 *p = xdr->p; 702 __be32 *p;
609 __be32 *q = p + XDR_QUADLEN(nbytes);
610 703
611 if (unlikely(q > xdr->end || q < p)) 704 if (nbytes == 0)
705 return xdr->p;
706 if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
612 return NULL; 707 return NULL;
613 xdr->p = q; 708 p = __xdr_inline_decode(xdr, nbytes);
614 return p; 709 if (p != NULL)
710 return p;
711 return xdr_copy_to_scratch(xdr, nbytes);
615} 712}
616EXPORT_SYMBOL_GPL(xdr_inline_decode); 713EXPORT_SYMBOL_GPL(xdr_inline_decode);
617 714
@@ -671,16 +768,12 @@ EXPORT_SYMBOL_GPL(xdr_read_pages);
671 */ 768 */
672void xdr_enter_page(struct xdr_stream *xdr, unsigned int len) 769void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
673{ 770{
674 char * kaddr = page_address(xdr->buf->pages[0]);
675 xdr_read_pages(xdr, len); 771 xdr_read_pages(xdr, len);
676 /* 772 /*
677 * Position current pointer at beginning of tail, and 773 * Position current pointer at beginning of tail, and
678 * set remaining message length. 774 * set remaining message length.
679 */ 775 */
680 if (len > PAGE_CACHE_SIZE - xdr->buf->page_base) 776 xdr_set_page_base(xdr, 0, len);
681 len = PAGE_CACHE_SIZE - xdr->buf->page_base;
682 xdr->p = (__be32 *)(kaddr + xdr->buf->page_base);
683 xdr->end = (__be32 *)((char *)xdr->p + len);
684} 777}
685EXPORT_SYMBOL_GPL(xdr_enter_page); 778EXPORT_SYMBOL_GPL(xdr_enter_page);
686 779
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 8eb889510916..d5e1e0b08890 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -26,6 +26,7 @@
26#include <net/sock.h> 26#include <net/sock.h>
27#include <net/xfrm.h> 27#include <net/xfrm.h>
28#include <net/netlink.h> 28#include <net/netlink.h>
29#include <net/ah.h>
29#include <asm/uaccess.h> 30#include <asm/uaccess.h>
30#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 31#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
31#include <linux/in6.h> 32#include <linux/in6.h>
@@ -302,7 +303,8 @@ static int attach_auth_trunc(struct xfrm_algo_auth **algpp, u8 *props,
302 algo = xfrm_aalg_get_byname(ualg->alg_name, 1); 303 algo = xfrm_aalg_get_byname(ualg->alg_name, 1);
303 if (!algo) 304 if (!algo)
304 return -ENOSYS; 305 return -ENOSYS;
305 if (ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits) 306 if ((ualg->alg_trunc_len / 8) > MAX_AH_AUTH_LEN ||
307 ualg->alg_trunc_len > algo->uinfo.auth.icv_fullbits)
306 return -EINVAL; 308 return -EINVAL;
307 *props = algo->desc.sadb_alg_id; 309 *props = algo->desc.sadb_alg_id;
308 310
@@ -2187,7 +2189,7 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
2187 2189
2188 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) || 2190 if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
2189 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) && 2191 type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
2190 (nlh->nlmsg_flags & NLM_F_DUMP)) { 2192 (nlh->nlmsg_flags & NLM_F_DUMP) == NLM_F_DUMP) {
2191 if (link->dump == NULL) 2193 if (link->dump == NULL)
2192 return -EINVAL; 2194 return -EINVAL;
2193 2195