aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/bluetooth/af_bluetooth.c3
-rw-r--r--net/bridge/br_stp_bpdu.c5
-rw-r--r--net/core/datagram.c2
-rw-r--r--net/core/dev.c7
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c5
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/inet_hashtables.c2
-rw-r--r--net/ipv4/netfilter/ip_nat_standalone.c2
-rw-r--r--net/ipv4/route.c45
-rw-r--r--net/ipv4/tcp.c28
-rw-r--r--net/netfilter/x_tables.c56
-rw-r--r--net/rxrpc/main.c2
-rw-r--r--net/sctp/input.c12
-rw-r--r--net/sctp/socket.c2
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/auth.c16
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c15
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c17
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_mech.c6
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c5
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_unseal.c4
-rw-r--r--net/sunrpc/clnt.c53
-rw-r--r--net/sunrpc/pmap_clnt.c41
-rw-r--r--net/sunrpc/rpc_pipe.c38
-rw-r--r--net/sunrpc/sched.c12
-rw-r--r--net/sunrpc/stats.c115
-rw-r--r--net/sunrpc/xprt.c29
-rw-r--r--net/sunrpc/xprtsock.c49
-rw-r--r--net/tipc/link.c2
-rw-r--r--net/unix/af_unix.c2
34 files changed, 432 insertions, 158 deletions
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index fb031fe9be9e..469eda0f0dfd 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -238,6 +238,9 @@ unsigned int bt_sock_poll(struct file * file, struct socket *sock, poll_table *w
238 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 238 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
239 mask |= POLLERR; 239 mask |= POLLERR;
240 240
241 if (sk->sk_shutdown & RCV_SHUTDOWN)
242 mask |= POLLRDHUP;
243
241 if (sk->sk_shutdown == SHUTDOWN_MASK) 244 if (sk->sk_shutdown == SHUTDOWN_MASK)
242 mask |= POLLHUP; 245 mask |= POLLHUP;
243 246
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8934a54792be..a7ba0cce0b46 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -19,6 +19,7 @@
19#include <linux/llc.h> 19#include <linux/llc.h>
20#include <net/llc.h> 20#include <net/llc.h>
21#include <net/llc_pdu.h> 21#include <net/llc_pdu.h>
22#include <asm/unaligned.h>
22 23
23#include "br_private.h" 24#include "br_private.h"
24#include "br_private_stp.h" 25#include "br_private_stp.h"
@@ -59,12 +60,12 @@ static inline void br_set_ticks(unsigned char *dest, int j)
59{ 60{
60 unsigned long ticks = (STP_HZ * j)/ HZ; 61 unsigned long ticks = (STP_HZ * j)/ HZ;
61 62
62 *((__be16 *) dest) = htons(ticks); 63 put_unaligned(htons(ticks), (__be16 *)dest);
63} 64}
64 65
65static inline int br_get_ticks(const unsigned char *src) 66static inline int br_get_ticks(const unsigned char *src)
66{ 67{
67 unsigned long ticks = ntohs(*(__be16 *)src); 68 unsigned long ticks = ntohs(get_unaligned((__be16 *)src));
68 69
69 return (ticks * HZ + STP_HZ - 1) / STP_HZ; 70 return (ticks * HZ + STP_HZ - 1) / STP_HZ;
70} 71}
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b8ce6bf81188..aecddcc30401 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -500,6 +500,8 @@ unsigned int datagram_poll(struct file *file, struct socket *sock,
500 /* exceptional events? */ 500 /* exceptional events? */
501 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 501 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
502 mask |= POLLERR; 502 mask |= POLLERR;
503 if (sk->sk_shutdown & RCV_SHUTDOWN)
504 mask |= POLLRDHUP;
503 if (sk->sk_shutdown == SHUTDOWN_MASK) 505 if (sk->sk_shutdown == SHUTDOWN_MASK)
504 mask |= POLLHUP; 506 mask |= POLLHUP;
505 507
diff --git a/net/core/dev.c b/net/core/dev.c
index 08dec6eb922b..e0489ca731c5 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -977,7 +977,12 @@ int register_netdevice_notifier(struct notifier_block *nb)
977 977
978int unregister_netdevice_notifier(struct notifier_block *nb) 978int unregister_netdevice_notifier(struct notifier_block *nb)
979{ 979{
980 return notifier_chain_unregister(&netdev_chain, nb); 980 int err;
981
982 rtnl_lock();
983 err = notifier_chain_unregister(&netdev_chain, nb);
984 rtnl_unlock();
985 return err;
981} 986}
982 987
983/** 988/**
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c9f878454531..09464fa8d72f 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -149,7 +149,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
149 149
150 /* Get the DATA. Size must match skb_add_mtu(). */ 150 /* Get the DATA. Size must match skb_add_mtu(). */
151 size = SKB_DATA_ALIGN(size); 151 size = SKB_DATA_ALIGN(size);
152 data = kmalloc(size + sizeof(struct skb_shared_info), gfp_mask); 152 data = ____kmalloc(size + sizeof(struct skb_shared_info), gfp_mask);
153 if (!data) 153 if (!data)
154 goto nodata; 154 goto nodata;
155 155
diff --git a/net/core/sock.c b/net/core/sock.c
index 1a7e6eac90b0..e110b9004147 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -404,8 +404,9 @@ set_rcvbuf:
404 if (!valbool) { 404 if (!valbool) {
405 sk->sk_bound_dev_if = 0; 405 sk->sk_bound_dev_if = 0;
406 } else { 406 } else {
407 if (optlen > IFNAMSIZ) 407 if (optlen > IFNAMSIZ - 1)
408 optlen = IFNAMSIZ; 408 optlen = IFNAMSIZ - 1;
409 memset(devname, 0, sizeof(devname));
409 if (copy_from_user(devname, optval, optlen)) { 410 if (copy_from_user(devname, optval, optlen)) {
410 ret = -EFAULT; 411 ret = -EFAULT;
411 break; 412 break;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index d4b293e16283..1ff7328b0e17 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -350,7 +350,7 @@ unsigned int dccp_poll(struct file *file, struct socket *sock,
350 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED) 350 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == DCCP_CLOSED)
351 mask |= POLLHUP; 351 mask |= POLLHUP;
352 if (sk->sk_shutdown & RCV_SHUTDOWN) 352 if (sk->sk_shutdown & RCV_SHUTDOWN)
353 mask |= POLLIN | POLLRDNORM; 353 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
354 354
355 /* Connected? */ 355 /* Connected? */
356 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) { 356 if ((1 << sk->sk_state) & ~(DCCPF_REQUESTING | DCCPF_RESPOND)) {
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index e7bbff4340bb..9831fd2c73a0 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -753,7 +753,7 @@ static void icmp_redirect(struct sk_buff *skb)
753 case ICMP_REDIR_HOST: 753 case ICMP_REDIR_HOST:
754 case ICMP_REDIR_HOSTTOS: 754 case ICMP_REDIR_HOSTTOS:
755 ip_rt_redirect(skb->nh.iph->saddr, ip, skb->h.icmph->un.gateway, 755 ip_rt_redirect(skb->nh.iph->saddr, ip, skb->h.icmph->un.gateway,
756 iph->saddr, iph->tos, skb->dev); 756 iph->saddr, skb->dev);
757 break; 757 break;
758 } 758 }
759out: 759out:
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 33228115cda4..ef7366fc132f 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -315,7 +315,7 @@ ok:
315 spin_unlock(&head->lock); 315 spin_unlock(&head->lock);
316 316
317 if (tw) { 317 if (tw) {
318 inet_twsk_deschedule(tw, death_row);; 318 inet_twsk_deschedule(tw, death_row);
319 inet_twsk_put(tw); 319 inet_twsk_put(tw);
320 } 320 }
321 321
diff --git a/net/ipv4/netfilter/ip_nat_standalone.c b/net/ipv4/netfilter/ip_nat_standalone.c
index ab1f88fa21ec..380aef3d7865 100644
--- a/net/ipv4/netfilter/ip_nat_standalone.c
+++ b/net/ipv4/netfilter/ip_nat_standalone.c
@@ -394,7 +394,7 @@ static int init_or_cleanup(int init)
394 ret = nf_register_hook(&ip_nat_local_out_ops); 394 ret = nf_register_hook(&ip_nat_local_out_ops);
395 if (ret < 0) { 395 if (ret < 0) {
396 printk("ip_nat_init: can't register local out hook.\n"); 396 printk("ip_nat_init: can't register local out hook.\n");
397 goto cleanup_adjustout_ops;; 397 goto cleanup_adjustout_ops;
398 } 398 }
399 ret = nf_register_hook(&ip_nat_local_in_ops); 399 ret = nf_register_hook(&ip_nat_local_in_ops);
400 if (ret < 0) { 400 if (ret < 0) {
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index fca5fe0cf94a..94fcbc5e5a1b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -55,6 +55,8 @@
55 * Robert Olsson : Added rt_cache statistics 55 * Robert Olsson : Added rt_cache statistics
56 * Arnaldo C. Melo : Convert proc stuff to seq_file 56 * Arnaldo C. Melo : Convert proc stuff to seq_file
57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes. 57 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
58 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
59 * Ilia Sotnikov : Removed TOS from hash calculations
58 * 60 *
59 * This program is free software; you can redistribute it and/or 61 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License 62 * modify it under the terms of the GNU General Public License
@@ -247,9 +249,9 @@ static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
247static int rt_intern_hash(unsigned hash, struct rtable *rth, 249static int rt_intern_hash(unsigned hash, struct rtable *rth,
248 struct rtable **res); 250 struct rtable **res);
249 251
250static unsigned int rt_hash_code(u32 daddr, u32 saddr, u8 tos) 252static unsigned int rt_hash_code(u32 daddr, u32 saddr)
251{ 253{
252 return (jhash_3words(daddr, saddr, (u32) tos, rt_hash_rnd) 254 return (jhash_2words(daddr, saddr, rt_hash_rnd)
253 & rt_hash_mask); 255 & rt_hash_mask);
254} 256}
255 257
@@ -1111,7 +1113,7 @@ static void rt_del(unsigned hash, struct rtable *rt)
1111} 1113}
1112 1114
1113void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw, 1115void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1114 u32 saddr, u8 tos, struct net_device *dev) 1116 u32 saddr, struct net_device *dev)
1115{ 1117{
1116 int i, k; 1118 int i, k;
1117 struct in_device *in_dev = in_dev_get(dev); 1119 struct in_device *in_dev = in_dev_get(dev);
@@ -1119,8 +1121,6 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1119 u32 skeys[2] = { saddr, 0 }; 1121 u32 skeys[2] = { saddr, 0 };
1120 int ikeys[2] = { dev->ifindex, 0 }; 1122 int ikeys[2] = { dev->ifindex, 0 };
1121 1123
1122 tos &= IPTOS_RT_MASK;
1123
1124 if (!in_dev) 1124 if (!in_dev)
1125 return; 1125 return;
1126 1126
@@ -1141,8 +1141,7 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1141 for (i = 0; i < 2; i++) { 1141 for (i = 0; i < 2; i++) {
1142 for (k = 0; k < 2; k++) { 1142 for (k = 0; k < 2; k++) {
1143 unsigned hash = rt_hash_code(daddr, 1143 unsigned hash = rt_hash_code(daddr,
1144 skeys[i] ^ (ikeys[k] << 5), 1144 skeys[i] ^ (ikeys[k] << 5));
1145 tos);
1146 1145
1147 rthp=&rt_hash_table[hash].chain; 1146 rthp=&rt_hash_table[hash].chain;
1148 1147
@@ -1152,7 +1151,6 @@ void ip_rt_redirect(u32 old_gw, u32 daddr, u32 new_gw,
1152 1151
1153 if (rth->fl.fl4_dst != daddr || 1152 if (rth->fl.fl4_dst != daddr ||
1154 rth->fl.fl4_src != skeys[i] || 1153 rth->fl.fl4_src != skeys[i] ||
1155 rth->fl.fl4_tos != tos ||
1156 rth->fl.oif != ikeys[k] || 1154 rth->fl.oif != ikeys[k] ||
1157 rth->fl.iif != 0) { 1155 rth->fl.iif != 0) {
1158 rthp = &rth->u.rt_next; 1156 rthp = &rth->u.rt_next;
@@ -1232,10 +1230,9 @@ reject_redirect:
1232 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) 1230 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit())
1233 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about " 1231 printk(KERN_INFO "Redirect from %u.%u.%u.%u on %s about "
1234 "%u.%u.%u.%u ignored.\n" 1232 "%u.%u.%u.%u ignored.\n"
1235 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u, " 1233 " Advised path = %u.%u.%u.%u -> %u.%u.%u.%u\n",
1236 "tos %02x\n",
1237 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw), 1234 NIPQUAD(old_gw), dev->name, NIPQUAD(new_gw),
1238 NIPQUAD(saddr), NIPQUAD(daddr), tos); 1235 NIPQUAD(saddr), NIPQUAD(daddr));
1239#endif 1236#endif
1240 in_dev_put(in_dev); 1237 in_dev_put(in_dev);
1241} 1238}
@@ -1253,8 +1250,7 @@ static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
1253 rt->u.dst.expires) { 1250 rt->u.dst.expires) {
1254 unsigned hash = rt_hash_code(rt->fl.fl4_dst, 1251 unsigned hash = rt_hash_code(rt->fl.fl4_dst,
1255 rt->fl.fl4_src ^ 1252 rt->fl.fl4_src ^
1256 (rt->fl.oif << 5), 1253 (rt->fl.oif << 5));
1257 rt->fl.fl4_tos);
1258#if RT_CACHE_DEBUG >= 1 1254#if RT_CACHE_DEBUG >= 1
1259 printk(KERN_DEBUG "ip_rt_advice: redirect to " 1255 printk(KERN_DEBUG "ip_rt_advice: redirect to "
1260 "%u.%u.%u.%u/%02x dropped\n", 1256 "%u.%u.%u.%u/%02x dropped\n",
@@ -1391,14 +1387,13 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1391 struct rtable *rth; 1387 struct rtable *rth;
1392 u32 skeys[2] = { iph->saddr, 0, }; 1388 u32 skeys[2] = { iph->saddr, 0, };
1393 u32 daddr = iph->daddr; 1389 u32 daddr = iph->daddr;
1394 u8 tos = iph->tos & IPTOS_RT_MASK;
1395 unsigned short est_mtu = 0; 1390 unsigned short est_mtu = 0;
1396 1391
1397 if (ipv4_config.no_pmtu_disc) 1392 if (ipv4_config.no_pmtu_disc)
1398 return 0; 1393 return 0;
1399 1394
1400 for (i = 0; i < 2; i++) { 1395 for (i = 0; i < 2; i++) {
1401 unsigned hash = rt_hash_code(daddr, skeys[i], tos); 1396 unsigned hash = rt_hash_code(daddr, skeys[i]);
1402 1397
1403 rcu_read_lock(); 1398 rcu_read_lock();
1404 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 1399 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -1407,7 +1402,6 @@ unsigned short ip_rt_frag_needed(struct iphdr *iph, unsigned short new_mtu)
1407 rth->fl.fl4_src == skeys[i] && 1402 rth->fl.fl4_src == skeys[i] &&
1408 rth->rt_dst == daddr && 1403 rth->rt_dst == daddr &&
1409 rth->rt_src == iph->saddr && 1404 rth->rt_src == iph->saddr &&
1410 rth->fl.fl4_tos == tos &&
1411 rth->fl.iif == 0 && 1405 rth->fl.iif == 0 &&
1412 !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) { 1406 !(dst_metric_locked(&rth->u.dst, RTAX_MTU))) {
1413 unsigned short mtu = new_mtu; 1407 unsigned short mtu = new_mtu;
@@ -1658,7 +1652,7 @@ static int ip_route_input_mc(struct sk_buff *skb, u32 daddr, u32 saddr,
1658 RT_CACHE_STAT_INC(in_slow_mc); 1652 RT_CACHE_STAT_INC(in_slow_mc);
1659 1653
1660 in_dev_put(in_dev); 1654 in_dev_put(in_dev);
1661 hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5), tos); 1655 hash = rt_hash_code(daddr, saddr ^ (dev->ifindex << 5));
1662 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst); 1656 return rt_intern_hash(hash, rth, (struct rtable**) &skb->dst);
1663 1657
1664e_nobufs: 1658e_nobufs:
@@ -1823,7 +1817,7 @@ static inline int ip_mkroute_input_def(struct sk_buff *skb,
1823 return err; 1817 return err;
1824 1818
1825 /* put it into the cache */ 1819 /* put it into the cache */
1826 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos); 1820 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
1827 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 1821 return rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
1828} 1822}
1829 1823
@@ -1864,7 +1858,7 @@ static inline int ip_mkroute_input(struct sk_buff *skb,
1864 return err; 1858 return err;
1865 1859
1866 /* put it into the cache */ 1860 /* put it into the cache */
1867 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5), tos); 1861 hash = rt_hash_code(daddr, saddr ^ (fl->iif << 5));
1868 err = rt_intern_hash(hash, rth, &rtres); 1862 err = rt_intern_hash(hash, rth, &rtres);
1869 if (err) 1863 if (err)
1870 return err; 1864 return err;
@@ -2041,7 +2035,7 @@ local_input:
2041 rth->rt_flags &= ~RTCF_LOCAL; 2035 rth->rt_flags &= ~RTCF_LOCAL;
2042 } 2036 }
2043 rth->rt_type = res.type; 2037 rth->rt_type = res.type;
2044 hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5), tos); 2038 hash = rt_hash_code(daddr, saddr ^ (fl.iif << 5));
2045 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst); 2039 err = rt_intern_hash(hash, rth, (struct rtable**)&skb->dst);
2046 goto done; 2040 goto done;
2047 2041
@@ -2088,7 +2082,7 @@ int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
2088 int iif = dev->ifindex; 2082 int iif = dev->ifindex;
2089 2083
2090 tos &= IPTOS_RT_MASK; 2084 tos &= IPTOS_RT_MASK;
2091 hash = rt_hash_code(daddr, saddr ^ (iif << 5), tos); 2085 hash = rt_hash_code(daddr, saddr ^ (iif << 5));
2092 2086
2093 rcu_read_lock(); 2087 rcu_read_lock();
2094 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2088 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
@@ -2286,10 +2280,8 @@ static inline int ip_mkroute_output_def(struct rtable **rp,
2286 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags); 2280 int err = __mkroute_output(&rth, res, fl, oldflp, dev_out, flags);
2287 unsigned hash; 2281 unsigned hash;
2288 if (err == 0) { 2282 if (err == 0) {
2289 u32 tos = RT_FL_TOS(oldflp);
2290
2291 hash = rt_hash_code(oldflp->fl4_dst, 2283 hash = rt_hash_code(oldflp->fl4_dst,
2292 oldflp->fl4_src ^ (oldflp->oif << 5), tos); 2284 oldflp->fl4_src ^ (oldflp->oif << 5));
2293 err = rt_intern_hash(hash, rth, rp); 2285 err = rt_intern_hash(hash, rth, rp);
2294 } 2286 }
2295 2287
@@ -2304,7 +2296,6 @@ static inline int ip_mkroute_output(struct rtable** rp,
2304 unsigned flags) 2296 unsigned flags)
2305{ 2297{
2306#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED 2298#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
2307 u32 tos = RT_FL_TOS(oldflp);
2308 unsigned char hop; 2299 unsigned char hop;
2309 unsigned hash; 2300 unsigned hash;
2310 int err = -EINVAL; 2301 int err = -EINVAL;
@@ -2334,7 +2325,7 @@ static inline int ip_mkroute_output(struct rtable** rp,
2334 2325
2335 hash = rt_hash_code(oldflp->fl4_dst, 2326 hash = rt_hash_code(oldflp->fl4_dst,
2336 oldflp->fl4_src ^ 2327 oldflp->fl4_src ^
2337 (oldflp->oif << 5), tos); 2328 (oldflp->oif << 5));
2338 err = rt_intern_hash(hash, rth, rp); 2329 err = rt_intern_hash(hash, rth, rp);
2339 2330
2340 /* forward hop information to multipath impl. */ 2331 /* forward hop information to multipath impl. */
@@ -2563,7 +2554,7 @@ int __ip_route_output_key(struct rtable **rp, const struct flowi *flp)
2563 unsigned hash; 2554 unsigned hash;
2564 struct rtable *rth; 2555 struct rtable *rth;
2565 2556
2566 hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5), flp->fl4_tos); 2557 hash = rt_hash_code(flp->fl4_dst, flp->fl4_src ^ (flp->oif << 5));
2567 2558
2568 rcu_read_lock_bh(); 2559 rcu_read_lock_bh();
2569 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth; 2560 for (rth = rcu_dereference(rt_hash_table[hash].chain); rth;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4b0272c92d66..87f68e787d0c 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -257,6 +257,7 @@
257#include <linux/fs.h> 257#include <linux/fs.h>
258#include <linux/random.h> 258#include <linux/random.h>
259#include <linux/bootmem.h> 259#include <linux/bootmem.h>
260#include <linux/cache.h>
260 261
261#include <net/icmp.h> 262#include <net/icmp.h>
262#include <net/tcp.h> 263#include <net/tcp.h>
@@ -275,9 +276,9 @@ atomic_t tcp_orphan_count = ATOMIC_INIT(0);
275 276
276EXPORT_SYMBOL_GPL(tcp_orphan_count); 277EXPORT_SYMBOL_GPL(tcp_orphan_count);
277 278
278int sysctl_tcp_mem[3]; 279int sysctl_tcp_mem[3] __read_mostly;
279int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 }; 280int sysctl_tcp_wmem[3] __read_mostly;
280int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 }; 281int sysctl_tcp_rmem[3] __read_mostly;
281 282
282EXPORT_SYMBOL(sysctl_tcp_mem); 283EXPORT_SYMBOL(sysctl_tcp_mem);
283EXPORT_SYMBOL(sysctl_tcp_rmem); 284EXPORT_SYMBOL(sysctl_tcp_rmem);
@@ -365,7 +366,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
365 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) 366 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
366 mask |= POLLHUP; 367 mask |= POLLHUP;
367 if (sk->sk_shutdown & RCV_SHUTDOWN) 368 if (sk->sk_shutdown & RCV_SHUTDOWN)
368 mask |= POLLIN | POLLRDNORM; 369 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
369 370
370 /* Connected? */ 371 /* Connected? */
371 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { 372 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
@@ -2081,7 +2082,8 @@ __setup("thash_entries=", set_thash_entries);
2081void __init tcp_init(void) 2082void __init tcp_init(void)
2082{ 2083{
2083 struct sk_buff *skb = NULL; 2084 struct sk_buff *skb = NULL;
2084 int order, i; 2085 unsigned long limit;
2086 int order, i, max_share;
2085 2087
2086 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb)) 2088 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2087 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb), 2089 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
@@ -2155,12 +2157,16 @@ void __init tcp_init(void)
2155 sysctl_tcp_mem[1] = 1024 << order; 2157 sysctl_tcp_mem[1] = 1024 << order;
2156 sysctl_tcp_mem[2] = 1536 << order; 2158 sysctl_tcp_mem[2] = 1536 << order;
2157 2159
2158 if (order < 3) { 2160 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2159 sysctl_tcp_wmem[2] = 64 * 1024; 2161 max_share = min(4UL*1024*1024, limit);
2160 sysctl_tcp_rmem[0] = PAGE_SIZE; 2162
2161 sysctl_tcp_rmem[1] = 43689; 2163 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2162 sysctl_tcp_rmem[2] = 2 * 43689; 2164 sysctl_tcp_wmem[1] = 16*1024;
2163 } 2165 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2166
2167 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2168 sysctl_tcp_rmem[1] = 87380;
2169 sysctl_tcp_rmem[2] = max(87380, max_share);
2164 2170
2165 printk(KERN_INFO "TCP: Hash tables configured " 2171 printk(KERN_INFO "TCP: Hash tables configured "
2166 "(established %d bind %d)\n", 2172 "(established %d bind %d)\n",
diff --git a/net/netfilter/x_tables.c b/net/netfilter/x_tables.c
index 0a29a24d9a72..a657ab5394c3 100644
--- a/net/netfilter/x_tables.c
+++ b/net/netfilter/x_tables.c
@@ -21,10 +21,12 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/string.h> 22#include <linux/string.h>
23#include <linux/vmalloc.h> 23#include <linux/vmalloc.h>
24#include <linux/mutex.h>
24 25
25#include <linux/netfilter/x_tables.h> 26#include <linux/netfilter/x_tables.h>
26#include <linux/netfilter_arp.h> 27#include <linux/netfilter_arp.h>
27 28
29
28MODULE_LICENSE("GPL"); 30MODULE_LICENSE("GPL");
29MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); 31MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
30MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module"); 32MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
@@ -32,7 +34,7 @@ MODULE_DESCRIPTION("[ip,ip6,arp]_tables backend module");
32#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1)) 34#define SMP_ALIGN(x) (((x) + SMP_CACHE_BYTES-1) & ~(SMP_CACHE_BYTES-1))
33 35
34struct xt_af { 36struct xt_af {
35 struct semaphore mutex; 37 struct mutex mutex;
36 struct list_head match; 38 struct list_head match;
37 struct list_head target; 39 struct list_head target;
38 struct list_head tables; 40 struct list_head tables;
@@ -64,11 +66,11 @@ xt_register_target(struct xt_target *target)
64{ 66{
65 int ret, af = target->family; 67 int ret, af = target->family;
66 68
67 ret = down_interruptible(&xt[af].mutex); 69 ret = mutex_lock_interruptible(&xt[af].mutex);
68 if (ret != 0) 70 if (ret != 0)
69 return ret; 71 return ret;
70 list_add(&target->list, &xt[af].target); 72 list_add(&target->list, &xt[af].target);
71 up(&xt[af].mutex); 73 mutex_unlock(&xt[af].mutex);
72 return ret; 74 return ret;
73} 75}
74EXPORT_SYMBOL(xt_register_target); 76EXPORT_SYMBOL(xt_register_target);
@@ -78,9 +80,9 @@ xt_unregister_target(struct xt_target *target)
78{ 80{
79 int af = target->family; 81 int af = target->family;
80 82
81 down(&xt[af].mutex); 83 mutex_lock(&xt[af].mutex);
82 LIST_DELETE(&xt[af].target, target); 84 LIST_DELETE(&xt[af].target, target);
83 up(&xt[af].mutex); 85 mutex_unlock(&xt[af].mutex);
84} 86}
85EXPORT_SYMBOL(xt_unregister_target); 87EXPORT_SYMBOL(xt_unregister_target);
86 88
@@ -89,12 +91,12 @@ xt_register_match(struct xt_match *match)
89{ 91{
90 int ret, af = match->family; 92 int ret, af = match->family;
91 93
92 ret = down_interruptible(&xt[af].mutex); 94 ret = mutex_lock_interruptible(&xt[af].mutex);
93 if (ret != 0) 95 if (ret != 0)
94 return ret; 96 return ret;
95 97
96 list_add(&match->list, &xt[af].match); 98 list_add(&match->list, &xt[af].match);
97 up(&xt[af].mutex); 99 mutex_unlock(&xt[af].mutex);
98 100
99 return ret; 101 return ret;
100} 102}
@@ -105,9 +107,9 @@ xt_unregister_match(struct xt_match *match)
105{ 107{
106 int af = match->family; 108 int af = match->family;
107 109
108 down(&xt[af].mutex); 110 mutex_lock(&xt[af].mutex);
109 LIST_DELETE(&xt[af].match, match); 111 LIST_DELETE(&xt[af].match, match);
110 up(&xt[af].mutex); 112 mutex_unlock(&xt[af].mutex);
111} 113}
112EXPORT_SYMBOL(xt_unregister_match); 114EXPORT_SYMBOL(xt_unregister_match);
113 115
@@ -124,21 +126,21 @@ struct xt_match *xt_find_match(int af, const char *name, u8 revision)
124 struct xt_match *m; 126 struct xt_match *m;
125 int err = 0; 127 int err = 0;
126 128
127 if (down_interruptible(&xt[af].mutex) != 0) 129 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
128 return ERR_PTR(-EINTR); 130 return ERR_PTR(-EINTR);
129 131
130 list_for_each_entry(m, &xt[af].match, list) { 132 list_for_each_entry(m, &xt[af].match, list) {
131 if (strcmp(m->name, name) == 0) { 133 if (strcmp(m->name, name) == 0) {
132 if (m->revision == revision) { 134 if (m->revision == revision) {
133 if (try_module_get(m->me)) { 135 if (try_module_get(m->me)) {
134 up(&xt[af].mutex); 136 mutex_unlock(&xt[af].mutex);
135 return m; 137 return m;
136 } 138 }
137 } else 139 } else
138 err = -EPROTOTYPE; /* Found something. */ 140 err = -EPROTOTYPE; /* Found something. */
139 } 141 }
140 } 142 }
141 up(&xt[af].mutex); 143 mutex_unlock(&xt[af].mutex);
142 return ERR_PTR(err); 144 return ERR_PTR(err);
143} 145}
144EXPORT_SYMBOL(xt_find_match); 146EXPORT_SYMBOL(xt_find_match);
@@ -149,21 +151,21 @@ struct xt_target *xt_find_target(int af, const char *name, u8 revision)
149 struct xt_target *t; 151 struct xt_target *t;
150 int err = 0; 152 int err = 0;
151 153
152 if (down_interruptible(&xt[af].mutex) != 0) 154 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
153 return ERR_PTR(-EINTR); 155 return ERR_PTR(-EINTR);
154 156
155 list_for_each_entry(t, &xt[af].target, list) { 157 list_for_each_entry(t, &xt[af].target, list) {
156 if (strcmp(t->name, name) == 0) { 158 if (strcmp(t->name, name) == 0) {
157 if (t->revision == revision) { 159 if (t->revision == revision) {
158 if (try_module_get(t->me)) { 160 if (try_module_get(t->me)) {
159 up(&xt[af].mutex); 161 mutex_unlock(&xt[af].mutex);
160 return t; 162 return t;
161 } 163 }
162 } else 164 } else
163 err = -EPROTOTYPE; /* Found something. */ 165 err = -EPROTOTYPE; /* Found something. */
164 } 166 }
165 } 167 }
166 up(&xt[af].mutex); 168 mutex_unlock(&xt[af].mutex);
167 return ERR_PTR(err); 169 return ERR_PTR(err);
168} 170}
169EXPORT_SYMBOL(xt_find_target); 171EXPORT_SYMBOL(xt_find_target);
@@ -218,7 +220,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target,
218{ 220{
219 int have_rev, best = -1; 221 int have_rev, best = -1;
220 222
221 if (down_interruptible(&xt[af].mutex) != 0) { 223 if (mutex_lock_interruptible(&xt[af].mutex) != 0) {
222 *err = -EINTR; 224 *err = -EINTR;
223 return 1; 225 return 1;
224 } 226 }
@@ -226,7 +228,7 @@ int xt_find_revision(int af, const char *name, u8 revision, int target,
226 have_rev = target_revfn(af, name, revision, &best); 228 have_rev = target_revfn(af, name, revision, &best);
227 else 229 else
228 have_rev = match_revfn(af, name, revision, &best); 230 have_rev = match_revfn(af, name, revision, &best);
229 up(&xt[af].mutex); 231 mutex_unlock(&xt[af].mutex);
230 232
231 /* Nothing at all? Return 0 to try loading module. */ 233 /* Nothing at all? Return 0 to try loading module. */
232 if (best == -1) { 234 if (best == -1) {
@@ -352,20 +354,20 @@ struct xt_table *xt_find_table_lock(int af, const char *name)
352{ 354{
353 struct xt_table *t; 355 struct xt_table *t;
354 356
355 if (down_interruptible(&xt[af].mutex) != 0) 357 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
356 return ERR_PTR(-EINTR); 358 return ERR_PTR(-EINTR);
357 359
358 list_for_each_entry(t, &xt[af].tables, list) 360 list_for_each_entry(t, &xt[af].tables, list)
359 if (strcmp(t->name, name) == 0 && try_module_get(t->me)) 361 if (strcmp(t->name, name) == 0 && try_module_get(t->me))
360 return t; 362 return t;
361 up(&xt[af].mutex); 363 mutex_unlock(&xt[af].mutex);
362 return NULL; 364 return NULL;
363} 365}
364EXPORT_SYMBOL_GPL(xt_find_table_lock); 366EXPORT_SYMBOL_GPL(xt_find_table_lock);
365 367
366void xt_table_unlock(struct xt_table *table) 368void xt_table_unlock(struct xt_table *table)
367{ 369{
368 up(&xt[table->af].mutex); 370 mutex_unlock(&xt[table->af].mutex);
369} 371}
370EXPORT_SYMBOL_GPL(xt_table_unlock); 372EXPORT_SYMBOL_GPL(xt_table_unlock);
371 373
@@ -405,7 +407,7 @@ int xt_register_table(struct xt_table *table,
405 int ret; 407 int ret;
406 struct xt_table_info *private; 408 struct xt_table_info *private;
407 409
408 ret = down_interruptible(&xt[table->af].mutex); 410 ret = mutex_lock_interruptible(&xt[table->af].mutex);
409 if (ret != 0) 411 if (ret != 0)
410 return ret; 412 return ret;
411 413
@@ -431,7 +433,7 @@ int xt_register_table(struct xt_table *table,
431 433
432 ret = 0; 434 ret = 0;
433 unlock: 435 unlock:
434 up(&xt[table->af].mutex); 436 mutex_unlock(&xt[table->af].mutex);
435 return ret; 437 return ret;
436} 438}
437EXPORT_SYMBOL_GPL(xt_register_table); 439EXPORT_SYMBOL_GPL(xt_register_table);
@@ -440,10 +442,10 @@ void *xt_unregister_table(struct xt_table *table)
440{ 442{
441 struct xt_table_info *private; 443 struct xt_table_info *private;
442 444
443 down(&xt[table->af].mutex); 445 mutex_lock(&xt[table->af].mutex);
444 private = table->private; 446 private = table->private;
445 LIST_DELETE(&xt[table->af].tables, table); 447 LIST_DELETE(&xt[table->af].tables, table);
446 up(&xt[table->af].mutex); 448 mutex_unlock(&xt[table->af].mutex);
447 449
448 return private; 450 return private;
449} 451}
@@ -507,7 +509,7 @@ static void *xt_tgt_seq_start(struct seq_file *seq, loff_t *pos)
507 if (!list) 509 if (!list)
508 return NULL; 510 return NULL;
509 511
510 if (down_interruptible(&xt[af].mutex) != 0) 512 if (mutex_lock_interruptible(&xt[af].mutex) != 0)
511 return NULL; 513 return NULL;
512 514
513 return xt_get_idx(list, seq, *pos); 515 return xt_get_idx(list, seq, *pos);
@@ -536,7 +538,7 @@ static void xt_tgt_seq_stop(struct seq_file *seq, void *v)
536 struct proc_dir_entry *pde = seq->private; 538 struct proc_dir_entry *pde = seq->private;
537 u_int16_t af = (unsigned long)pde->data & 0xffff; 539 u_int16_t af = (unsigned long)pde->data & 0xffff;
538 540
539 up(&xt[af].mutex); 541 mutex_unlock(&xt[af].mutex);
540} 542}
541 543
542static int xt_name_seq_show(struct seq_file *seq, void *v) 544static int xt_name_seq_show(struct seq_file *seq, void *v)
@@ -668,7 +670,7 @@ static int __init xt_init(void)
668 return -ENOMEM; 670 return -ENOMEM;
669 671
670 for (i = 0; i < NPROTO; i++) { 672 for (i = 0; i < NPROTO; i++) {
671 init_MUTEX(&xt[i].mutex); 673 mutex_init(&xt[i].mutex);
672 INIT_LIST_HEAD(&xt[i].target); 674 INIT_LIST_HEAD(&xt[i].target);
673 INIT_LIST_HEAD(&xt[i].match); 675 INIT_LIST_HEAD(&xt[i].match);
674 INIT_LIST_HEAD(&xt[i].tables); 676 INIT_LIST_HEAD(&xt[i].tables);
diff --git a/net/rxrpc/main.c b/net/rxrpc/main.c
index 36fdcbcd80d1..48cbd065bb45 100644
--- a/net/rxrpc/main.c
+++ b/net/rxrpc/main.c
@@ -79,8 +79,8 @@ static int __init rxrpc_initialise(void)
79 error_sysctl: 79 error_sysctl:
80#ifdef CONFIG_SYSCTL 80#ifdef CONFIG_SYSCTL
81 rxrpc_sysctl_cleanup(); 81 rxrpc_sysctl_cleanup();
82#endif
83 error_proc: 82 error_proc:
83#endif
84#ifdef CONFIG_PROC_FS 84#ifdef CONFIG_PROC_FS
85 rxrpc_proc_cleanup(); 85 rxrpc_proc_cleanup();
86#endif 86#endif
diff --git a/net/sctp/input.c b/net/sctp/input.c
index cb78b50868ee..d117ebc75cf8 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -127,7 +127,6 @@ int sctp_rcv(struct sk_buff *skb)
127 union sctp_addr dest; 127 union sctp_addr dest;
128 int family; 128 int family;
129 struct sctp_af *af; 129 struct sctp_af *af;
130 int ret = 0;
131 130
132 if (skb->pkt_type!=PACKET_HOST) 131 if (skb->pkt_type!=PACKET_HOST)
133 goto discard_it; 132 goto discard_it;
@@ -227,16 +226,13 @@ int sctp_rcv(struct sk_buff *skb)
227 goto discard_release; 226 goto discard_release;
228 nf_reset(skb); 227 nf_reset(skb);
229 228
230 ret = sk_filter(sk, skb, 1); 229 if (sk_filter(sk, skb, 1))
231 if (ret)
232 goto discard_release; 230 goto discard_release;
233 231
234 /* Create an SCTP packet structure. */ 232 /* Create an SCTP packet structure. */
235 chunk = sctp_chunkify(skb, asoc, sk); 233 chunk = sctp_chunkify(skb, asoc, sk);
236 if (!chunk) { 234 if (!chunk)
237 ret = -ENOMEM;
238 goto discard_release; 235 goto discard_release;
239 }
240 SCTP_INPUT_CB(skb)->chunk = chunk; 236 SCTP_INPUT_CB(skb)->chunk = chunk;
241 237
242 /* Remember what endpoint is to handle this packet. */ 238 /* Remember what endpoint is to handle this packet. */
@@ -277,11 +273,11 @@ int sctp_rcv(struct sk_buff *skb)
277 sctp_bh_unlock_sock(sk); 273 sctp_bh_unlock_sock(sk);
278 sock_put(sk); 274 sock_put(sk);
279 275
280 return ret; 276 return 0;
281 277
282discard_it: 278discard_it:
283 kfree_skb(skb); 279 kfree_skb(skb);
284 return ret; 280 return 0;
285 281
286discard_release: 282discard_release:
287 /* Release any structures we may be holding. */ 283 /* Release any structures we may be holding. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 0ea947eb6813..b6e4b89539b3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -4894,6 +4894,8 @@ unsigned int sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
4894 /* Is there any exceptional events? */ 4894 /* Is there any exceptional events? */
4895 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) 4895 if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
4896 mask |= POLLERR; 4896 mask |= POLLERR;
4897 if (sk->sk_shutdown & RCV_SHUTDOWN)
4898 mask |= POLLRDHUP;
4897 if (sk->sk_shutdown == SHUTDOWN_MASK) 4899 if (sk->sk_shutdown == SHUTDOWN_MASK)
4898 mask |= POLLHUP; 4900 mask |= POLLHUP;
4899 4901
diff --git a/net/socket.c b/net/socket.c
index e2d5bae994de..5211ba270375 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -319,7 +319,8 @@ static int init_inodecache(void)
319{ 319{
320 sock_inode_cachep = kmem_cache_create("sock_inode_cache", 320 sock_inode_cachep = kmem_cache_create("sock_inode_cache",
321 sizeof(struct socket_alloc), 321 sizeof(struct socket_alloc),
322 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, 322 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
323 SLAB_MEM_SPREAD),
323 init_once, NULL); 324 init_once, NULL);
324 if (sock_inode_cachep == NULL) 325 if (sock_inode_cachep == NULL)
325 return -ENOMEM; 326 return -ENOMEM;
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 8d6f1a176b15..55163af3dcaf 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -64,14 +64,26 @@ rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
64 struct rpc_authops *ops; 64 struct rpc_authops *ops;
65 u32 flavor = pseudoflavor_to_flavor(pseudoflavor); 65 u32 flavor = pseudoflavor_to_flavor(pseudoflavor);
66 66
67 if (flavor >= RPC_AUTH_MAXFLAVOR || !(ops = auth_flavors[flavor])) 67 auth = ERR_PTR(-EINVAL);
68 return ERR_PTR(-EINVAL); 68 if (flavor >= RPC_AUTH_MAXFLAVOR)
69 goto out;
70
71 /* FIXME - auth_flavors[] really needs an rw lock,
72 * and module refcounting. */
73#ifdef CONFIG_KMOD
74 if ((ops = auth_flavors[flavor]) == NULL)
75 request_module("rpc-auth-%u", flavor);
76#endif
77 if ((ops = auth_flavors[flavor]) == NULL)
78 goto out;
69 auth = ops->create(clnt, pseudoflavor); 79 auth = ops->create(clnt, pseudoflavor);
70 if (IS_ERR(auth)) 80 if (IS_ERR(auth))
71 return auth; 81 return auth;
72 if (clnt->cl_auth) 82 if (clnt->cl_auth)
73 rpcauth_destroy(clnt->cl_auth); 83 rpcauth_destroy(clnt->cl_auth);
74 clnt->cl_auth = auth; 84 clnt->cl_auth = auth;
85
86out:
75 return auth; 87 return auth;
76} 88}
77 89
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index bb46efd92e57..900ef31f5a0e 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -721,6 +721,8 @@ gss_destroy(struct rpc_auth *auth)
721 721
722 gss_auth = container_of(auth, struct gss_auth, rpc_auth); 722 gss_auth = container_of(auth, struct gss_auth, rpc_auth);
723 rpc_unlink(gss_auth->path); 723 rpc_unlink(gss_auth->path);
724 dput(gss_auth->dentry);
725 gss_auth->dentry = NULL;
724 gss_mech_put(gss_auth->mech); 726 gss_mech_put(gss_auth->mech);
725 727
726 rpcauth_free_credcache(auth); 728 rpcauth_free_credcache(auth);
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index d0dfdfd5e79e..f43311221a72 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -70,15 +70,19 @@
70# define RPCDBG_FACILITY RPCDBG_AUTH 70# define RPCDBG_FACILITY RPCDBG_AUTH
71#endif 71#endif
72 72
73spinlock_t krb5_seq_lock = SPIN_LOCK_UNLOCKED;
74
73u32 75u32
74gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text, 76gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
75 struct xdr_netobj *token) 77 struct xdr_netobj *token)
76{ 78{
77 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; 79 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id;
78 s32 checksum_type; 80 s32 checksum_type;
79 struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; 81 char cksumdata[16];
82 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
80 unsigned char *ptr, *krb5_hdr, *msg_start; 83 unsigned char *ptr, *krb5_hdr, *msg_start;
81 s32 now; 84 s32 now;
85 u32 seq_send;
82 86
83 dprintk("RPC: gss_krb5_seal\n"); 87 dprintk("RPC: gss_krb5_seal\n");
84 88
@@ -133,16 +137,15 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
133 BUG(); 137 BUG();
134 } 138 }
135 139
136 kfree(md5cksum.data); 140 spin_lock(&krb5_seq_lock);
141 seq_send = ctx->seq_send++;
142 spin_unlock(&krb5_seq_lock);
137 143
138 if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, 144 if ((krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
139 ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) 145 seq_send, krb5_hdr + 16, krb5_hdr + 8)))
140 goto out_err; 146 goto out_err;
141 147
142 ctx->seq_send++;
143
144 return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); 148 return ((ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
145out_err: 149out_err:
146 kfree(md5cksum.data);
147 return GSS_S_FAILURE; 150 return GSS_S_FAILURE;
148} 151}
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index db055fd7d778..0828cf64100f 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -79,7 +79,8 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
79 int signalg; 79 int signalg;
80 int sealalg; 80 int sealalg;
81 s32 checksum_type; 81 s32 checksum_type;
82 struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; 82 char cksumdata[16];
83 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
83 s32 now; 84 s32 now;
84 int direction; 85 int direction;
85 s32 seqnum; 86 s32 seqnum;
@@ -176,6 +177,5 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
176 177
177 ret = GSS_S_COMPLETE; 178 ret = GSS_S_COMPLETE;
178out: 179out:
179 kfree(md5cksum.data);
180 return ret; 180 return ret;
181} 181}
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index af777cf9f251..89d1f3e14128 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -121,12 +121,14 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
121{ 121{
122 struct krb5_ctx *kctx = ctx->internal_ctx_id; 122 struct krb5_ctx *kctx = ctx->internal_ctx_id;
123 s32 checksum_type; 123 s32 checksum_type;
124 struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; 124 char cksumdata[16];
125 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
125 int blocksize = 0, plainlen; 126 int blocksize = 0, plainlen;
126 unsigned char *ptr, *krb5_hdr, *msg_start; 127 unsigned char *ptr, *krb5_hdr, *msg_start;
127 s32 now; 128 s32 now;
128 int headlen; 129 int headlen;
129 struct page **tmp_pages; 130 struct page **tmp_pages;
131 u32 seq_send;
130 132
131 dprintk("RPC: gss_wrap_kerberos\n"); 133 dprintk("RPC: gss_wrap_kerberos\n");
132 134
@@ -205,23 +207,22 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
205 BUG(); 207 BUG();
206 } 208 }
207 209
208 kfree(md5cksum.data); 210 spin_lock(&krb5_seq_lock);
211 seq_send = kctx->seq_send++;
212 spin_unlock(&krb5_seq_lock);
209 213
210 /* XXX would probably be more efficient to compute checksum 214 /* XXX would probably be more efficient to compute checksum
211 * and encrypt at the same time: */ 215 * and encrypt at the same time: */
212 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff, 216 if ((krb5_make_seq_num(kctx->seq, kctx->initiate ? 0 : 0xff,
213 kctx->seq_send, krb5_hdr + 16, krb5_hdr + 8))) 217 seq_send, krb5_hdr + 16, krb5_hdr + 8)))
214 goto out_err; 218 goto out_err;
215 219
216 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize, 220 if (gss_encrypt_xdr_buf(kctx->enc, buf, offset + headlen - blocksize,
217 pages)) 221 pages))
218 goto out_err; 222 goto out_err;
219 223
220 kctx->seq_send++;
221
222 return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE); 224 return ((kctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE);
223out_err: 225out_err:
224 if (md5cksum.data) kfree(md5cksum.data);
225 return GSS_S_FAILURE; 226 return GSS_S_FAILURE;
226} 227}
227 228
@@ -232,7 +233,8 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
232 int signalg; 233 int signalg;
233 int sealalg; 234 int sealalg;
234 s32 checksum_type; 235 s32 checksum_type;
235 struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; 236 char cksumdata[16];
237 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
236 s32 now; 238 s32 now;
237 int direction; 239 int direction;
238 s32 seqnum; 240 s32 seqnum;
@@ -358,6 +360,5 @@ gss_unwrap_kerberos(struct gss_ctx *ctx, int offset, struct xdr_buf *buf)
358 360
359 ret = GSS_S_COMPLETE; 361 ret = GSS_S_COMPLETE;
360out: 362out:
361 if (md5cksum.data) kfree(md5cksum.data);
362 return ret; 363 return ret;
363} 364}
diff --git a/net/sunrpc/auth_gss/gss_spkm3_mech.c b/net/sunrpc/auth_gss/gss_spkm3_mech.c
index 58400807d4df..5bf11ccba7cd 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_mech.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_mech.c
@@ -102,6 +102,12 @@ get_key(const void *p, const void *end, struct crypto_tfm **res, int *resalg)
102 alg_mode = CRYPTO_TFM_MODE_CBC; 102 alg_mode = CRYPTO_TFM_MODE_CBC;
103 setkey = 1; 103 setkey = 1;
104 break; 104 break;
105 case NID_cast5_cbc:
106 /* XXXX here in name only, not used */
107 alg_name = "cast5";
108 alg_mode = CRYPTO_TFM_MODE_CBC;
109 setkey = 0; /* XXX will need to set to 1 */
110 break;
105 case NID_md5: 111 case NID_md5:
106 if (key.len == 0) { 112 if (key.len == 0) {
107 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n"); 113 dprintk("RPC: SPKM3 get_key: NID_md5 zero Key length\n");
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index 86fbf7c3e39c..18c7862bc234 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -57,7 +57,8 @@ spkm3_make_token(struct spkm3_ctx *ctx,
57{ 57{
58 s32 checksum_type; 58 s32 checksum_type;
59 char tokhdrbuf[25]; 59 char tokhdrbuf[25];
60 struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; 60 char cksumdata[16];
61 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
61 struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf}; 62 struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf};
62 int tokenlen = 0; 63 int tokenlen = 0;
63 unsigned char *ptr; 64 unsigned char *ptr;
@@ -115,13 +116,11 @@ spkm3_make_token(struct spkm3_ctx *ctx,
115 dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK not supported\n"); 116 dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK not supported\n");
116 goto out_err; 117 goto out_err;
117 } 118 }
118 kfree(md5cksum.data);
119 119
120 /* XXX need to implement sequence numbers, and ctx->expired */ 120 /* XXX need to implement sequence numbers, and ctx->expired */
121 121
122 return GSS_S_COMPLETE; 122 return GSS_S_COMPLETE;
123out_err: 123out_err:
124 kfree(md5cksum.data);
125 token->data = NULL; 124 token->data = NULL;
126 token->len = 0; 125 token->len = 0;
127 return GSS_S_FAILURE; 126 return GSS_S_FAILURE;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_unseal.c b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
index 96851b0ba1ba..8537f581ef9b 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_unseal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_unseal.c
@@ -56,7 +56,8 @@ spkm3_read_token(struct spkm3_ctx *ctx,
56{ 56{
57 s32 code; 57 s32 code;
58 struct xdr_netobj wire_cksum = {.len =0, .data = NULL}; 58 struct xdr_netobj wire_cksum = {.len =0, .data = NULL};
59 struct xdr_netobj md5cksum = {.len = 0, .data = NULL}; 59 char cksumdata[16];
60 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
60 unsigned char *ptr = (unsigned char *)read_token->data; 61 unsigned char *ptr = (unsigned char *)read_token->data;
61 unsigned char *cksum; 62 unsigned char *cksum;
62 int bodysize, md5elen; 63 int bodysize, md5elen;
@@ -120,7 +121,6 @@ spkm3_read_token(struct spkm3_ctx *ctx,
120 /* XXX: need to add expiration and sequencing */ 121 /* XXX: need to add expiration and sequencing */
121 ret = GSS_S_COMPLETE; 122 ret = GSS_S_COMPLETE;
122out: 123out:
123 kfree(md5cksum.data);
124 kfree(wire_cksum.data); 124 kfree(wire_cksum.data);
125 return ret; 125 return ret;
126} 126}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index d78479782045..aa8965e9d307 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -28,12 +28,11 @@
28#include <linux/mm.h> 28#include <linux/mm.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/utsname.h> 30#include <linux/utsname.h>
31#include <linux/workqueue.h>
31 32
32#include <linux/sunrpc/clnt.h> 33#include <linux/sunrpc/clnt.h>
33#include <linux/workqueue.h>
34#include <linux/sunrpc/rpc_pipe_fs.h> 34#include <linux/sunrpc/rpc_pipe_fs.h>
35 35#include <linux/sunrpc/metrics.h>
36#include <linux/nfs.h>
37 36
38 37
39#define RPC_SLACK_SPACE (1024) /* total overkill */ 38#define RPC_SLACK_SPACE (1024) /* total overkill */
@@ -71,8 +70,15 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
71 static uint32_t clntid; 70 static uint32_t clntid;
72 int error; 71 int error;
73 72
73 clnt->cl_vfsmnt = ERR_PTR(-ENOENT);
74 clnt->cl_dentry = ERR_PTR(-ENOENT);
74 if (dir_name == NULL) 75 if (dir_name == NULL)
75 return 0; 76 return 0;
77
78 clnt->cl_vfsmnt = rpc_get_mount();
79 if (IS_ERR(clnt->cl_vfsmnt))
80 return PTR_ERR(clnt->cl_vfsmnt);
81
76 for (;;) { 82 for (;;) {
77 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname), 83 snprintf(clnt->cl_pathname, sizeof(clnt->cl_pathname),
78 "%s/clnt%x", dir_name, 84 "%s/clnt%x", dir_name,
@@ -85,6 +91,7 @@ rpc_setup_pipedir(struct rpc_clnt *clnt, char *dir_name)
85 if (error != -EEXIST) { 91 if (error != -EEXIST) {
86 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n", 92 printk(KERN_INFO "RPC: Couldn't create pipefs entry %s, error %d\n",
87 clnt->cl_pathname, error); 93 clnt->cl_pathname, error);
94 rpc_put_mount();
88 return error; 95 return error;
89 } 96 }
90 } 97 }
@@ -147,6 +154,7 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
147 clnt->cl_vers = version->number; 154 clnt->cl_vers = version->number;
148 clnt->cl_prot = xprt->prot; 155 clnt->cl_prot = xprt->prot;
149 clnt->cl_stats = program->stats; 156 clnt->cl_stats = program->stats;
157 clnt->cl_metrics = rpc_alloc_iostats(clnt);
150 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait"); 158 rpc_init_wait_queue(&clnt->cl_pmap_default.pm_bindwait, "bindwait");
151 159
152 if (!clnt->cl_port) 160 if (!clnt->cl_port)
@@ -175,7 +183,11 @@ rpc_new_client(struct rpc_xprt *xprt, char *servname,
175 return clnt; 183 return clnt;
176 184
177out_no_auth: 185out_no_auth:
178 rpc_rmdir(clnt->cl_pathname); 186 if (!IS_ERR(clnt->cl_dentry)) {
187 rpc_rmdir(clnt->cl_pathname);
188 dput(clnt->cl_dentry);
189 rpc_put_mount();
190 }
179out_no_path: 191out_no_path:
180 if (clnt->cl_server != clnt->cl_inline_name) 192 if (clnt->cl_server != clnt->cl_inline_name)
181 kfree(clnt->cl_server); 193 kfree(clnt->cl_server);
@@ -240,11 +252,15 @@ rpc_clone_client(struct rpc_clnt *clnt)
240 new->cl_autobind = 0; 252 new->cl_autobind = 0;
241 new->cl_oneshot = 0; 253 new->cl_oneshot = 0;
242 new->cl_dead = 0; 254 new->cl_dead = 0;
255 if (!IS_ERR(new->cl_dentry)) {
256 dget(new->cl_dentry);
257 rpc_get_mount();
258 }
243 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval); 259 rpc_init_rtt(&new->cl_rtt_default, clnt->cl_xprt->timeout.to_initval);
244 if (new->cl_auth) 260 if (new->cl_auth)
245 atomic_inc(&new->cl_auth->au_count); 261 atomic_inc(&new->cl_auth->au_count);
246 new->cl_pmap = &new->cl_pmap_default; 262 new->cl_pmap = &new->cl_pmap_default;
247 rpc_init_wait_queue(&new->cl_pmap_default.pm_bindwait, "bindwait"); 263 new->cl_metrics = rpc_alloc_iostats(clnt);
248 return new; 264 return new;
249out_no_clnt: 265out_no_clnt:
250 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__); 266 printk(KERN_INFO "RPC: out of memory in %s\n", __FUNCTION__);
@@ -314,6 +330,12 @@ rpc_destroy_client(struct rpc_clnt *clnt)
314 if (clnt->cl_server != clnt->cl_inline_name) 330 if (clnt->cl_server != clnt->cl_inline_name)
315 kfree(clnt->cl_server); 331 kfree(clnt->cl_server);
316out_free: 332out_free:
333 rpc_free_iostats(clnt->cl_metrics);
334 clnt->cl_metrics = NULL;
335 if (!IS_ERR(clnt->cl_dentry)) {
336 dput(clnt->cl_dentry);
337 rpc_put_mount();
338 }
317 kfree(clnt); 339 kfree(clnt);
318 return 0; 340 return 0;
319} 341}
@@ -473,15 +495,16 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
473 int status; 495 int status;
474 496
475 /* If this client is slain all further I/O fails */ 497 /* If this client is slain all further I/O fails */
498 status = -EIO;
476 if (clnt->cl_dead) 499 if (clnt->cl_dead)
477 return -EIO; 500 goto out_release;
478 501
479 flags |= RPC_TASK_ASYNC; 502 flags |= RPC_TASK_ASYNC;
480 503
481 /* Create/initialize a new RPC task */ 504 /* Create/initialize a new RPC task */
482 status = -ENOMEM; 505 status = -ENOMEM;
483 if (!(task = rpc_new_task(clnt, flags, tk_ops, data))) 506 if (!(task = rpc_new_task(clnt, flags, tk_ops, data)))
484 goto out; 507 goto out_release;
485 508
486 /* Mask signals on GSS_AUTH upcalls */ 509 /* Mask signals on GSS_AUTH upcalls */
487 rpc_task_sigmask(task, &oldset); 510 rpc_task_sigmask(task, &oldset);
@@ -496,7 +519,10 @@ rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
496 rpc_release_task(task); 519 rpc_release_task(task);
497 520
498 rpc_restore_sigmask(&oldset); 521 rpc_restore_sigmask(&oldset);
499out: 522 return status;
523out_release:
524 if (tk_ops->rpc_release != NULL)
525 tk_ops->rpc_release(data);
500 return status; 526 return status;
501} 527}
502 528
@@ -993,6 +1019,8 @@ call_timeout(struct rpc_task *task)
993 } 1019 }
994 1020
995 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid); 1021 dprintk("RPC: %4d call_timeout (major)\n", task->tk_pid);
1022 task->tk_timeouts++;
1023
996 if (RPC_IS_SOFT(task)) { 1024 if (RPC_IS_SOFT(task)) {
997 printk(KERN_NOTICE "%s: server %s not responding, timed out\n", 1025 printk(KERN_NOTICE "%s: server %s not responding, timed out\n",
998 clnt->cl_protname, clnt->cl_server); 1026 clnt->cl_protname, clnt->cl_server);
@@ -1045,6 +1073,11 @@ call_decode(struct rpc_task *task)
1045 return; 1073 return;
1046 } 1074 }
1047 1075
1076 /*
1077 * Ensure that we see all writes made by xprt_complete_rqst()
1078 * before it changed req->rq_received.
1079 */
1080 smp_rmb();
1048 req->rq_rcv_buf.len = req->rq_private_buf.len; 1081 req->rq_rcv_buf.len = req->rq_private_buf.len;
1049 1082
1050 /* Check that the softirq receive buffer is valid */ 1083 /* Check that the softirq receive buffer is valid */
@@ -1194,8 +1227,8 @@ call_verify(struct rpc_task *task)
1194 task->tk_action = call_bind; 1227 task->tk_action = call_bind;
1195 goto out_retry; 1228 goto out_retry;
1196 case RPC_AUTH_TOOWEAK: 1229 case RPC_AUTH_TOOWEAK:
1197 printk(KERN_NOTICE "call_verify: server requires stronger " 1230 printk(KERN_NOTICE "call_verify: server %s requires stronger "
1198 "authentication.\n"); 1231 "authentication.\n", task->tk_client->cl_server);
1199 break; 1232 break;
1200 default: 1233 default:
1201 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n); 1234 printk(KERN_WARNING "call_verify: unknown auth error: %x\n", n);
diff --git a/net/sunrpc/pmap_clnt.c b/net/sunrpc/pmap_clnt.c
index 8139ce68e915..d25b054ec921 100644
--- a/net/sunrpc/pmap_clnt.c
+++ b/net/sunrpc/pmap_clnt.c
@@ -82,6 +82,7 @@ rpc_getport(struct rpc_task *task, struct rpc_clnt *clnt)
82 rpc_call_setup(child, &msg, 0); 82 rpc_call_setup(child, &msg, 0);
83 83
84 /* ... and run the child task */ 84 /* ... and run the child task */
85 task->tk_xprt->stat.bind_count++;
85 rpc_run_child(task, child, pmap_getport_done); 86 rpc_run_child(task, child, pmap_getport_done);
86 return; 87 return;
87 88
@@ -103,6 +104,11 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
103 .pm_prot = prot, 104 .pm_prot = prot,
104 .pm_port = 0 105 .pm_port = 0
105 }; 106 };
107 struct rpc_message msg = {
108 .rpc_proc = &pmap_procedures[PMAP_GETPORT],
109 .rpc_argp = &map,
110 .rpc_resp = &map.pm_port,
111 };
106 struct rpc_clnt *pmap_clnt; 112 struct rpc_clnt *pmap_clnt;
107 char hostname[32]; 113 char hostname[32];
108 int status; 114 int status;
@@ -116,7 +122,7 @@ rpc_getport_external(struct sockaddr_in *sin, __u32 prog, __u32 vers, int prot)
116 return PTR_ERR(pmap_clnt); 122 return PTR_ERR(pmap_clnt);
117 123
118 /* Setup the call info struct */ 124 /* Setup the call info struct */
119 status = rpc_call(pmap_clnt, PMAP_GETPORT, &map, &map.pm_port, 0); 125 status = rpc_call_sync(pmap_clnt, &msg, 0);
120 126
121 if (status >= 0) { 127 if (status >= 0) {
122 if (map.pm_port != 0) 128 if (map.pm_port != 0)
@@ -161,16 +167,27 @@ pmap_getport_done(struct rpc_task *task)
161int 167int
162rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay) 168rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
163{ 169{
164 struct sockaddr_in sin; 170 struct sockaddr_in sin = {
165 struct rpc_portmap map; 171 .sin_family = AF_INET,
172 .sin_addr.s_addr = htonl(INADDR_LOOPBACK),
173 };
174 struct rpc_portmap map = {
175 .pm_prog = prog,
176 .pm_vers = vers,
177 .pm_prot = prot,
178 .pm_port = port,
179 };
180 struct rpc_message msg = {
181 .rpc_proc = &pmap_procedures[port ? PMAP_SET : PMAP_UNSET],
182 .rpc_argp = &map,
183 .rpc_resp = okay,
184 };
166 struct rpc_clnt *pmap_clnt; 185 struct rpc_clnt *pmap_clnt;
167 int error = 0; 186 int error = 0;
168 187
169 dprintk("RPC: registering (%d, %d, %d, %d) with portmapper.\n", 188 dprintk("RPC: registering (%d, %d, %d, %d) with portmapper.\n",
170 prog, vers, prot, port); 189 prog, vers, prot, port);
171 190
172 sin.sin_family = AF_INET;
173 sin.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
174 pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1); 191 pmap_clnt = pmap_create("localhost", &sin, IPPROTO_UDP, 1);
175 if (IS_ERR(pmap_clnt)) { 192 if (IS_ERR(pmap_clnt)) {
176 error = PTR_ERR(pmap_clnt); 193 error = PTR_ERR(pmap_clnt);
@@ -178,13 +195,7 @@ rpc_register(u32 prog, u32 vers, int prot, unsigned short port, int *okay)
178 return error; 195 return error;
179 } 196 }
180 197
181 map.pm_prog = prog; 198 error = rpc_call_sync(pmap_clnt, &msg, 0);
182 map.pm_vers = vers;
183 map.pm_prot = prot;
184 map.pm_port = port;
185
186 error = rpc_call(pmap_clnt, port? PMAP_SET : PMAP_UNSET,
187 &map, okay, 0);
188 199
189 if (error < 0) { 200 if (error < 0) {
190 printk(KERN_WARNING 201 printk(KERN_WARNING
@@ -260,6 +271,8 @@ static struct rpc_procinfo pmap_procedures[] = {
260 .p_decode = (kxdrproc_t) xdr_decode_bool, 271 .p_decode = (kxdrproc_t) xdr_decode_bool,
261 .p_bufsiz = 4, 272 .p_bufsiz = 4,
262 .p_count = 1, 273 .p_count = 1,
274 .p_statidx = PMAP_SET,
275 .p_name = "SET",
263 }, 276 },
264[PMAP_UNSET] = { 277[PMAP_UNSET] = {
265 .p_proc = PMAP_UNSET, 278 .p_proc = PMAP_UNSET,
@@ -267,6 +280,8 @@ static struct rpc_procinfo pmap_procedures[] = {
267 .p_decode = (kxdrproc_t) xdr_decode_bool, 280 .p_decode = (kxdrproc_t) xdr_decode_bool,
268 .p_bufsiz = 4, 281 .p_bufsiz = 4,
269 .p_count = 1, 282 .p_count = 1,
283 .p_statidx = PMAP_UNSET,
284 .p_name = "UNSET",
270 }, 285 },
271[PMAP_GETPORT] = { 286[PMAP_GETPORT] = {
272 .p_proc = PMAP_GETPORT, 287 .p_proc = PMAP_GETPORT,
@@ -274,6 +289,8 @@ static struct rpc_procinfo pmap_procedures[] = {
274 .p_decode = (kxdrproc_t) xdr_decode_port, 289 .p_decode = (kxdrproc_t) xdr_decode_port,
275 .p_bufsiz = 4, 290 .p_bufsiz = 4,
276 .p_count = 1, 291 .p_count = 1,
292 .p_statidx = PMAP_GETPORT,
293 .p_name = "GETPORT",
277 }, 294 },
278}; 295};
279 296
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index a5c0c7b6e151..aa4158be9900 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -91,7 +91,8 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
91 res = 0; 91 res = 0;
92 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) { 92 } else if (rpci->flags & RPC_PIPE_WAIT_FOR_OPEN) {
93 if (list_empty(&rpci->pipe)) 93 if (list_empty(&rpci->pipe))
94 schedule_delayed_work(&rpci->queue_timeout, 94 queue_delayed_work(rpciod_workqueue,
95 &rpci->queue_timeout,
95 RPC_UPCALL_TIMEOUT); 96 RPC_UPCALL_TIMEOUT);
96 list_add_tail(&msg->list, &rpci->pipe); 97 list_add_tail(&msg->list, &rpci->pipe);
97 rpci->pipelen += msg->len; 98 rpci->pipelen += msg->len;
@@ -132,7 +133,7 @@ rpc_close_pipes(struct inode *inode)
132 if (ops->release_pipe) 133 if (ops->release_pipe)
133 ops->release_pipe(inode); 134 ops->release_pipe(inode);
134 cancel_delayed_work(&rpci->queue_timeout); 135 cancel_delayed_work(&rpci->queue_timeout);
135 flush_scheduled_work(); 136 flush_workqueue(rpciod_workqueue);
136 } 137 }
137 rpc_inode_setowner(inode, NULL); 138 rpc_inode_setowner(inode, NULL);
138 mutex_unlock(&inode->i_mutex); 139 mutex_unlock(&inode->i_mutex);
@@ -434,14 +435,17 @@ static struct rpc_filelist authfiles[] = {
434 }, 435 },
435}; 436};
436 437
437static int 438struct vfsmount *rpc_get_mount(void)
438rpc_get_mount(void)
439{ 439{
440 return simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count); 440 int err;
441
442 err = simple_pin_fs("rpc_pipefs", &rpc_mount, &rpc_mount_count);
443 if (err != 0)
444 return ERR_PTR(err);
445 return rpc_mount;
441} 446}
442 447
443static void 448void rpc_put_mount(void)
444rpc_put_mount(void)
445{ 449{
446 simple_release_fs(&rpc_mount, &rpc_mount_count); 450 simple_release_fs(&rpc_mount, &rpc_mount_count);
447} 451}
@@ -451,12 +455,13 @@ rpc_lookup_parent(char *path, struct nameidata *nd)
451{ 455{
452 if (path[0] == '\0') 456 if (path[0] == '\0')
453 return -ENOENT; 457 return -ENOENT;
454 if (rpc_get_mount()) { 458 nd->mnt = rpc_get_mount();
459 if (IS_ERR(nd->mnt)) {
455 printk(KERN_WARNING "%s: %s failed to mount " 460 printk(KERN_WARNING "%s: %s failed to mount "
456 "pseudofilesystem \n", __FILE__, __FUNCTION__); 461 "pseudofilesystem \n", __FILE__, __FUNCTION__);
457 return -ENODEV; 462 return PTR_ERR(nd->mnt);
458 } 463 }
459 nd->mnt = mntget(rpc_mount); 464 mntget(nd->mnt);
460 nd->dentry = dget(rpc_mount->mnt_root); 465 nd->dentry = dget(rpc_mount->mnt_root);
461 nd->last_type = LAST_ROOT; 466 nd->last_type = LAST_ROOT;
462 nd->flags = LOOKUP_PARENT; 467 nd->flags = LOOKUP_PARENT;
@@ -593,7 +598,6 @@ __rpc_mkdir(struct inode *dir, struct dentry *dentry)
593 d_instantiate(dentry, inode); 598 d_instantiate(dentry, inode);
594 dir->i_nlink++; 599 dir->i_nlink++;
595 inode_dir_notify(dir, DN_CREATE); 600 inode_dir_notify(dir, DN_CREATE);
596 rpc_get_mount();
597 return 0; 601 return 0;
598out_err: 602out_err:
599 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n", 603 printk(KERN_WARNING "%s: %s failed to allocate inode for dentry %s\n",
@@ -614,7 +618,6 @@ __rpc_rmdir(struct inode *dir, struct dentry *dentry)
614 if (!error) { 618 if (!error) {
615 inode_dir_notify(dir, DN_DELETE); 619 inode_dir_notify(dir, DN_DELETE);
616 d_drop(dentry); 620 d_drop(dentry);
617 rpc_put_mount();
618 } 621 }
619 return 0; 622 return 0;
620} 623}
@@ -668,7 +671,7 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
668out: 671out:
669 mutex_unlock(&dir->i_mutex); 672 mutex_unlock(&dir->i_mutex);
670 rpc_release_path(&nd); 673 rpc_release_path(&nd);
671 return dentry; 674 return dget(dentry);
672err_depopulate: 675err_depopulate:
673 rpc_depopulate(dentry); 676 rpc_depopulate(dentry);
674 __rpc_rmdir(dir, dentry); 677 __rpc_rmdir(dir, dentry);
@@ -732,7 +735,7 @@ rpc_mkpipe(char *path, void *private, struct rpc_pipe_ops *ops, int flags)
732out: 735out:
733 mutex_unlock(&dir->i_mutex); 736 mutex_unlock(&dir->i_mutex);
734 rpc_release_path(&nd); 737 rpc_release_path(&nd);
735 return dentry; 738 return dget(dentry);
736err_dput: 739err_dput:
737 dput(dentry); 740 dput(dentry);
738 dentry = ERR_PTR(-ENOMEM); 741 dentry = ERR_PTR(-ENOMEM);
@@ -849,9 +852,10 @@ init_once(void * foo, kmem_cache_t * cachep, unsigned long flags)
849int register_rpc_pipefs(void) 852int register_rpc_pipefs(void)
850{ 853{
851 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache", 854 rpc_inode_cachep = kmem_cache_create("rpc_inode_cache",
852 sizeof(struct rpc_inode), 855 sizeof(struct rpc_inode),
853 0, SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT, 856 0, (SLAB_HWCACHE_ALIGN|SLAB_RECLAIM_ACCOUNT|
854 init_once, NULL); 857 SLAB_MEM_SPREAD),
858 init_once, NULL);
855 if (!rpc_inode_cachep) 859 if (!rpc_inode_cachep)
856 return -ENOMEM; 860 return -ENOMEM;
857 register_filesystem(&rpc_pipe_fs_type); 861 register_filesystem(&rpc_pipe_fs_type);
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index dff07795bd16..b9969b91a9f7 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -65,7 +65,7 @@ static LIST_HEAD(all_tasks);
65 */ 65 */
66static DEFINE_MUTEX(rpciod_mutex); 66static DEFINE_MUTEX(rpciod_mutex);
67static unsigned int rpciod_users; 67static unsigned int rpciod_users;
68static struct workqueue_struct *rpciod_workqueue; 68struct workqueue_struct *rpciod_workqueue;
69 69
70/* 70/*
71 * Spinlock for other critical sections of code. 71 * Spinlock for other critical sections of code.
@@ -182,6 +182,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *
182 else 182 else
183 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 183 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
184 task->u.tk_wait.rpc_waitq = queue; 184 task->u.tk_wait.rpc_waitq = queue;
185 queue->qlen++;
185 rpc_set_queued(task); 186 rpc_set_queued(task);
186 187
187 dprintk("RPC: %4d added to queue %p \"%s\"\n", 188 dprintk("RPC: %4d added to queue %p \"%s\"\n",
@@ -216,6 +217,7 @@ static void __rpc_remove_wait_queue(struct rpc_task *task)
216 __rpc_remove_wait_queue_priority(task); 217 __rpc_remove_wait_queue_priority(task);
217 else 218 else
218 list_del(&task->u.tk_wait.list); 219 list_del(&task->u.tk_wait.list);
220 queue->qlen--;
219 dprintk("RPC: %4d removed from queue %p \"%s\"\n", 221 dprintk("RPC: %4d removed from queue %p \"%s\"\n",
220 task->tk_pid, queue, rpc_qname(queue)); 222 task->tk_pid, queue, rpc_qname(queue));
221} 223}
@@ -816,6 +818,9 @@ void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt, int flags, cons
816 818
817 BUG_ON(task->tk_ops == NULL); 819 BUG_ON(task->tk_ops == NULL);
818 820
821 /* starting timestamp */
822 task->tk_start = jiffies;
823
819 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid, 824 dprintk("RPC: %4d new task procpid %d\n", task->tk_pid,
820 current->pid); 825 current->pid);
821} 826}
@@ -917,8 +922,11 @@ struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
917{ 922{
918 struct rpc_task *task; 923 struct rpc_task *task;
919 task = rpc_new_task(clnt, flags, ops, data); 924 task = rpc_new_task(clnt, flags, ops, data);
920 if (task == NULL) 925 if (task == NULL) {
926 if (ops->rpc_release != NULL)
927 ops->rpc_release(data);
921 return ERR_PTR(-ENOMEM); 928 return ERR_PTR(-ENOMEM);
929 }
922 atomic_inc(&task->tk_count); 930 atomic_inc(&task->tk_count);
923 rpc_execute(task); 931 rpc_execute(task);
924 return task; 932 return task;
diff --git a/net/sunrpc/stats.c b/net/sunrpc/stats.c
index 4979f226e285..790941e8af4d 100644
--- a/net/sunrpc/stats.c
+++ b/net/sunrpc/stats.c
@@ -21,6 +21,7 @@
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22#include <linux/sunrpc/clnt.h> 22#include <linux/sunrpc/clnt.h>
23#include <linux/sunrpc/svcsock.h> 23#include <linux/sunrpc/svcsock.h>
24#include <linux/sunrpc/metrics.h>
24 25
25#define RPCDBG_FACILITY RPCDBG_MISC 26#define RPCDBG_FACILITY RPCDBG_MISC
26 27
@@ -106,6 +107,120 @@ void svc_seq_show(struct seq_file *seq, const struct svc_stat *statp) {
106 } 107 }
107} 108}
108 109
110/**
111 * rpc_alloc_iostats - allocate an rpc_iostats structure
112 * @clnt: RPC program, version, and xprt
113 *
114 */
115struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
116{
117 unsigned int ops = clnt->cl_maxproc;
118 size_t size = ops * sizeof(struct rpc_iostats);
119 struct rpc_iostats *new;
120
121 new = kmalloc(size, GFP_KERNEL);
122 if (new)
123 memset(new, 0 , size);
124 return new;
125}
126EXPORT_SYMBOL(rpc_alloc_iostats);
127
128/**
129 * rpc_free_iostats - release an rpc_iostats structure
130 * @stats: doomed rpc_iostats structure
131 *
132 */
133void rpc_free_iostats(struct rpc_iostats *stats)
134{
135 kfree(stats);
136}
137EXPORT_SYMBOL(rpc_free_iostats);
138
139/**
140 * rpc_count_iostats - tally up per-task stats
141 * @task: completed rpc_task
142 *
143 * Relies on the caller for serialization.
144 */
145void rpc_count_iostats(struct rpc_task *task)
146{
147 struct rpc_rqst *req = task->tk_rqstp;
148 struct rpc_iostats *stats = task->tk_client->cl_metrics;
149 struct rpc_iostats *op_metrics;
150 long rtt, execute, queue;
151
152 if (!stats || !req)
153 return;
154 op_metrics = &stats[task->tk_msg.rpc_proc->p_statidx];
155
156 op_metrics->om_ops++;
157 op_metrics->om_ntrans += req->rq_ntrans;
158 op_metrics->om_timeouts += task->tk_timeouts;
159
160 op_metrics->om_bytes_sent += task->tk_bytes_sent;
161 op_metrics->om_bytes_recv += req->rq_received;
162
163 queue = (long)req->rq_xtime - task->tk_start;
164 if (queue < 0)
165 queue = -queue;
166 op_metrics->om_queue += queue;
167
168 rtt = task->tk_rtt;
169 if (rtt < 0)
170 rtt = -rtt;
171 op_metrics->om_rtt += rtt;
172
173 execute = (long)jiffies - task->tk_start;
174 if (execute < 0)
175 execute = -execute;
176 op_metrics->om_execute += execute;
177}
178
179void _print_name(struct seq_file *seq, unsigned int op, struct rpc_procinfo *procs)
180{
181 if (procs[op].p_name)
182 seq_printf(seq, "\t%12s: ", procs[op].p_name);
183 else if (op == 0)
184 seq_printf(seq, "\t NULL: ");
185 else
186 seq_printf(seq, "\t%12u: ", op);
187}
188
189#define MILLISECS_PER_JIFFY (1000 / HZ)
190
191void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
192{
193 struct rpc_iostats *stats = clnt->cl_metrics;
194 struct rpc_xprt *xprt = clnt->cl_xprt;
195 unsigned int op, maxproc = clnt->cl_maxproc;
196
197 if (!stats)
198 return;
199
200 seq_printf(seq, "\tRPC iostats version: %s ", RPC_IOSTATS_VERS);
201 seq_printf(seq, "p/v: %u/%u (%s)\n",
202 clnt->cl_prog, clnt->cl_vers, clnt->cl_protname);
203
204 if (xprt)
205 xprt->ops->print_stats(xprt, seq);
206
207 seq_printf(seq, "\tper-op statistics\n");
208 for (op = 0; op < maxproc; op++) {
209 struct rpc_iostats *metrics = &stats[op];
210 _print_name(seq, op, clnt->cl_procinfo);
211 seq_printf(seq, "%lu %lu %lu %Lu %Lu %Lu %Lu %Lu\n",
212 metrics->om_ops,
213 metrics->om_ntrans,
214 metrics->om_timeouts,
215 metrics->om_bytes_sent,
216 metrics->om_bytes_recv,
217 metrics->om_queue * MILLISECS_PER_JIFFY,
218 metrics->om_rtt * MILLISECS_PER_JIFFY,
219 metrics->om_execute * MILLISECS_PER_JIFFY);
220 }
221}
222EXPORT_SYMBOL(rpc_print_iostats);
223
109/* 224/*
110 * Register/unregister RPC proc files 225 * Register/unregister RPC proc files
111 */ 226 */
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 8ff2c8acb223..4dd5b3cfe754 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -44,13 +44,13 @@
44#include <linux/random.h> 44#include <linux/random.h>
45 45
46#include <linux/sunrpc/clnt.h> 46#include <linux/sunrpc/clnt.h>
47#include <linux/sunrpc/metrics.h>
47 48
48/* 49/*
49 * Local variables 50 * Local variables
50 */ 51 */
51 52
52#ifdef RPC_DEBUG 53#ifdef RPC_DEBUG
53# undef RPC_DEBUG_DATA
54# define RPCDBG_FACILITY RPCDBG_XPRT 54# define RPCDBG_FACILITY RPCDBG_XPRT
55#endif 55#endif
56 56
@@ -548,6 +548,7 @@ void xprt_connect(struct rpc_task *task)
548 548
549 task->tk_timeout = xprt->connect_timeout; 549 task->tk_timeout = xprt->connect_timeout;
550 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); 550 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
551 xprt->stat.connect_start = jiffies;
551 xprt->ops->connect(task); 552 xprt->ops->connect(task);
552 } 553 }
553 return; 554 return;
@@ -558,6 +559,8 @@ static void xprt_connect_status(struct rpc_task *task)
558 struct rpc_xprt *xprt = task->tk_xprt; 559 struct rpc_xprt *xprt = task->tk_xprt;
559 560
560 if (task->tk_status >= 0) { 561 if (task->tk_status >= 0) {
562 xprt->stat.connect_count++;
563 xprt->stat.connect_time += (long)jiffies - xprt->stat.connect_start;
561 dprintk("RPC: %4d xprt_connect_status: connection established\n", 564 dprintk("RPC: %4d xprt_connect_status: connection established\n",
562 task->tk_pid); 565 task->tk_pid);
563 return; 566 return;
@@ -601,16 +604,14 @@ static void xprt_connect_status(struct rpc_task *task)
601struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid) 604struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, u32 xid)
602{ 605{
603 struct list_head *pos; 606 struct list_head *pos;
604 struct rpc_rqst *req = NULL;
605 607
606 list_for_each(pos, &xprt->recv) { 608 list_for_each(pos, &xprt->recv) {
607 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list); 609 struct rpc_rqst *entry = list_entry(pos, struct rpc_rqst, rq_list);
608 if (entry->rq_xid == xid) { 610 if (entry->rq_xid == xid)
609 req = entry; 611 return entry;
610 break;
611 }
612 } 612 }
613 return req; 613 xprt->stat.bad_xids++;
614 return NULL;
614} 615}
615 616
616/** 617/**
@@ -646,7 +647,12 @@ void xprt_complete_rqst(struct rpc_task *task, int copied)
646 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", 647 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
647 task->tk_pid, ntohl(req->rq_xid), copied); 648 task->tk_pid, ntohl(req->rq_xid), copied);
648 649
650 task->tk_xprt->stat.recvs++;
651 task->tk_rtt = (long)jiffies - req->rq_xtime;
652
649 list_del_init(&req->rq_list); 653 list_del_init(&req->rq_list);
654 /* Ensure all writes are done before we update req->rq_received */
655 smp_wmb();
650 req->rq_received = req->rq_private_buf.len = copied; 656 req->rq_received = req->rq_private_buf.len = copied;
651 rpc_wake_up_task(task); 657 rpc_wake_up_task(task);
652} 658}
@@ -723,7 +729,6 @@ void xprt_transmit(struct rpc_task *task)
723 729
724 dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); 730 dprintk("RPC: %4d xprt_transmit(%u)\n", task->tk_pid, req->rq_slen);
725 731
726 smp_rmb();
727 if (!req->rq_received) { 732 if (!req->rq_received) {
728 if (list_empty(&req->rq_list)) { 733 if (list_empty(&req->rq_list)) {
729 spin_lock_bh(&xprt->transport_lock); 734 spin_lock_bh(&xprt->transport_lock);
@@ -744,12 +749,19 @@ void xprt_transmit(struct rpc_task *task)
744 if (status == 0) { 749 if (status == 0) {
745 dprintk("RPC: %4d xmit complete\n", task->tk_pid); 750 dprintk("RPC: %4d xmit complete\n", task->tk_pid);
746 spin_lock_bh(&xprt->transport_lock); 751 spin_lock_bh(&xprt->transport_lock);
752
747 xprt->ops->set_retrans_timeout(task); 753 xprt->ops->set_retrans_timeout(task);
754
755 xprt->stat.sends++;
756 xprt->stat.req_u += xprt->stat.sends - xprt->stat.recvs;
757 xprt->stat.bklog_u += xprt->backlog.qlen;
758
748 /* Don't race with disconnect */ 759 /* Don't race with disconnect */
749 if (!xprt_connected(xprt)) 760 if (!xprt_connected(xprt))
750 task->tk_status = -ENOTCONN; 761 task->tk_status = -ENOTCONN;
751 else if (!req->rq_received) 762 else if (!req->rq_received)
752 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 763 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
764
753 xprt->ops->release_xprt(xprt, task); 765 xprt->ops->release_xprt(xprt, task);
754 spin_unlock_bh(&xprt->transport_lock); 766 spin_unlock_bh(&xprt->transport_lock);
755 return; 767 return;
@@ -848,6 +860,7 @@ void xprt_release(struct rpc_task *task)
848 860
849 if (!(req = task->tk_rqstp)) 861 if (!(req = task->tk_rqstp))
850 return; 862 return;
863 rpc_count_iostats(task);
851 spin_lock_bh(&xprt->transport_lock); 864 spin_lock_bh(&xprt->transport_lock);
852 xprt->ops->release_xprt(xprt, task); 865 xprt->ops->release_xprt(xprt, task);
853 if (xprt->ops->release_request) 866 if (xprt->ops->release_request)
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index c458f8d1d6d1..4b4e7dfdff14 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -382,6 +382,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
382 /* If we've sent the entire packet, immediately 382 /* If we've sent the entire packet, immediately
383 * reset the count of bytes sent. */ 383 * reset the count of bytes sent. */
384 req->rq_bytes_sent += status; 384 req->rq_bytes_sent += status;
385 task->tk_bytes_sent += status;
385 if (likely(req->rq_bytes_sent >= req->rq_slen)) { 386 if (likely(req->rq_bytes_sent >= req->rq_slen)) {
386 req->rq_bytes_sent = 0; 387 req->rq_bytes_sent = 0;
387 return 0; 388 return 0;
@@ -1114,6 +1115,8 @@ static void xs_tcp_connect_worker(void *args)
1114 } 1115 }
1115 1116
1116 /* Tell the socket layer to start connecting... */ 1117 /* Tell the socket layer to start connecting... */
1118 xprt->stat.connect_count++;
1119 xprt->stat.connect_start = jiffies;
1117 status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr, 1120 status = sock->ops->connect(sock, (struct sockaddr *) &xprt->addr,
1118 sizeof(xprt->addr), O_NONBLOCK); 1121 sizeof(xprt->addr), O_NONBLOCK);
1119 dprintk("RPC: %p connect status %d connected %d sock state %d\n", 1122 dprintk("RPC: %p connect status %d connected %d sock state %d\n",
@@ -1177,6 +1180,50 @@ static void xs_connect(struct rpc_task *task)
1177 } 1180 }
1178} 1181}
1179 1182
1183/**
1184 * xs_udp_print_stats - display UDP socket-specifc stats
1185 * @xprt: rpc_xprt struct containing statistics
1186 * @seq: output file
1187 *
1188 */
1189static void xs_udp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1190{
1191 seq_printf(seq, "\txprt:\tudp %u %lu %lu %lu %lu %Lu %Lu\n",
1192 xprt->port,
1193 xprt->stat.bind_count,
1194 xprt->stat.sends,
1195 xprt->stat.recvs,
1196 xprt->stat.bad_xids,
1197 xprt->stat.req_u,
1198 xprt->stat.bklog_u);
1199}
1200
1201/**
1202 * xs_tcp_print_stats - display TCP socket-specifc stats
1203 * @xprt: rpc_xprt struct containing statistics
1204 * @seq: output file
1205 *
1206 */
1207static void xs_tcp_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
1208{
1209 long idle_time = 0;
1210
1211 if (xprt_connected(xprt))
1212 idle_time = (long)(jiffies - xprt->last_used) / HZ;
1213
1214 seq_printf(seq, "\txprt:\ttcp %u %lu %lu %lu %ld %lu %lu %lu %Lu %Lu\n",
1215 xprt->port,
1216 xprt->stat.bind_count,
1217 xprt->stat.connect_count,
1218 xprt->stat.connect_time,
1219 idle_time,
1220 xprt->stat.sends,
1221 xprt->stat.recvs,
1222 xprt->stat.bad_xids,
1223 xprt->stat.req_u,
1224 xprt->stat.bklog_u);
1225}
1226
1180static struct rpc_xprt_ops xs_udp_ops = { 1227static struct rpc_xprt_ops xs_udp_ops = {
1181 .set_buffer_size = xs_udp_set_buffer_size, 1228 .set_buffer_size = xs_udp_set_buffer_size,
1182 .reserve_xprt = xprt_reserve_xprt_cong, 1229 .reserve_xprt = xprt_reserve_xprt_cong,
@@ -1191,6 +1238,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
1191 .release_request = xprt_release_rqst_cong, 1238 .release_request = xprt_release_rqst_cong,
1192 .close = xs_close, 1239 .close = xs_close,
1193 .destroy = xs_destroy, 1240 .destroy = xs_destroy,
1241 .print_stats = xs_udp_print_stats,
1194}; 1242};
1195 1243
1196static struct rpc_xprt_ops xs_tcp_ops = { 1244static struct rpc_xprt_ops xs_tcp_ops = {
@@ -1204,6 +1252,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1204 .set_retrans_timeout = xprt_set_retrans_timeout_def, 1252 .set_retrans_timeout = xprt_set_retrans_timeout_def,
1205 .close = xs_close, 1253 .close = xs_close,
1206 .destroy = xs_destroy, 1254 .destroy = xs_destroy,
1255 .print_stats = xs_tcp_print_stats,
1207}; 1256};
1208 1257
1209/** 1258/**
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 910b37e5083d..784b24b6d102 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1629,7 +1629,7 @@ void tipc_link_retransmit(struct link *l_ptr, struct sk_buff *buf,
1629 tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>"); 1629 tipc_msg_print(TIPC_CONS, buf_msg(buf), ">RETR>");
1630 info("...Retransmitted %u times\n", 1630 info("...Retransmitted %u times\n",
1631 l_ptr->stale_count); 1631 l_ptr->stale_count);
1632 link_print(l_ptr, TIPC_CONS, "Resetting Link\n");; 1632 link_print(l_ptr, TIPC_CONS, "Resetting Link\n");
1633 tipc_link_reset(l_ptr); 1633 tipc_link_reset(l_ptr);
1634 break; 1634 break;
1635 } 1635 }
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 2b4cc2eea5b3..d901465ce013 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1878,6 +1878,8 @@ static unsigned int unix_poll(struct file * file, struct socket *sock, poll_tabl
1878 mask |= POLLERR; 1878 mask |= POLLERR;
1879 if (sk->sk_shutdown == SHUTDOWN_MASK) 1879 if (sk->sk_shutdown == SHUTDOWN_MASK)
1880 mask |= POLLHUP; 1880 mask |= POLLHUP;
1881 if (sk->sk_shutdown & RCV_SHUTDOWN)
1882 mask |= POLLRDHUP;
1881 1883
1882 /* readable? */ 1884 /* readable? */
1883 if (!skb_queue_empty(&sk->sk_receive_queue) || 1885 if (!skb_queue_empty(&sk->sk_receive_queue) ||