aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
Diffstat (limited to 'net')
-rw-r--r--net/can/raw.c3
-rw-r--r--net/core/ethtool.c5
-rw-r--r--net/core/net-sysfs.c2
-rw-r--r--net/core/rtnetlink.c6
-rw-r--r--net/dccp/probe.c19
-rw-r--r--net/ipv4/fib_hash.c17
-rw-r--r--net/ipv4/fib_trie.c18
-rw-r--r--net/ipv4/icmp.c10
-rw-r--r--net/ipv4/ip_options.c12
-rw-r--r--net/ipv4/route.c11
-rw-r--r--net/ipv4/tcp.c2
-rw-r--r--net/ipv4/tcp_input.c2
-rw-r--r--net/ipv4/tcp_ipv4.c36
-rw-r--r--net/ipv4/tcp_probe.c9
-rw-r--r--net/ipv4/udp.c15
-rw-r--r--net/ipv6/Kconfig2
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/raw.c18
-rw-r--r--net/ipv6/route.c5
-rw-r--r--net/key/af_key.c7
-rw-r--r--net/mac80211/mesh.h1
-rw-r--r--net/mac80211/mesh_hwmp.c1
-rw-r--r--net/mac80211/mlme.c28
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/wme.c2
-rw-r--r--net/rose/rose_route.c2
-rw-r--r--net/rxrpc/rxkad.c2
-rw-r--r--net/sctp/objcnt.c9
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/Makefile2
-rw-r--r--net/sunrpc/auth.c71
-rw-r--r--net/sunrpc/auth_generic.c177
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c105
-rw-r--r--net/sunrpc/auth_gss/gss_generic_token.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_crypto.c6
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seal.c9
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_seqnum.c4
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_unseal.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c8
-rw-r--r--net/sunrpc/auth_gss/gss_spkm3_seal.c4
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c9
-rw-r--r--net/sunrpc/auth_null.c3
-rw-r--r--net/sunrpc/auth_unix.c59
-rw-r--r--net/sunrpc/cache.c1
-rw-r--r--net/sunrpc/clnt.c50
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/sched.c264
-rw-r--r--net/sunrpc/svc.c25
-rw-r--r--net/sunrpc/svc_xprt.c30
-rw-r--r--net/sunrpc/svcauth_unix.c118
-rw-r--r--net/sunrpc/svcsock.c29
-rw-r--r--net/sunrpc/xprt.c80
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c2
-rw-r--r--net/sunrpc/xprtsock.c82
-rw-r--r--net/tipc/socket.c4
-rw-r--r--net/unix/af_unix.c6
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_user.c2
59 files changed, 893 insertions, 525 deletions
diff --git a/net/can/raw.c b/net/can/raw.c
index ead50c7c0d40..201cbfc6b9ec 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -573,7 +573,8 @@ static int raw_getsockopt(struct socket *sock, int level, int optname,
573 int fsize = ro->count * sizeof(struct can_filter); 573 int fsize = ro->count * sizeof(struct can_filter);
574 if (len > fsize) 574 if (len > fsize)
575 len = fsize; 575 len = fsize;
576 err = copy_to_user(optval, ro->filter, len); 576 if (copy_to_user(optval, ro->filter, len))
577 err = -EFAULT;
577 } else 578 } else
578 len = 0; 579 len = 0;
579 release_sock(sk); 580 release_sock(sk);
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index a29b43d0b450..0133b5ebd545 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -323,6 +323,11 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
323 bytes_remaining -= eeprom.len; 323 bytes_remaining -= eeprom.len;
324 } 324 }
325 325
326 eeprom.len = userbuf - (useraddr + sizeof(eeprom));
327 eeprom.offset -= eeprom.len;
328 if (copy_to_user(useraddr, &eeprom, sizeof(eeprom)))
329 ret = -EFAULT;
330
326 kfree(data); 331 kfree(data);
327 return ret; 332 return ret;
328} 333}
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index 7635d3f72723..4e7b847347f7 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -87,6 +87,7 @@ static ssize_t netdev_store(struct device *dev, struct device_attribute *attr,
87 return ret; 87 return ret;
88} 88}
89 89
90NETDEVICE_SHOW(dev_id, fmt_hex);
90NETDEVICE_SHOW(addr_len, fmt_dec); 91NETDEVICE_SHOW(addr_len, fmt_dec);
91NETDEVICE_SHOW(iflink, fmt_dec); 92NETDEVICE_SHOW(iflink, fmt_dec);
92NETDEVICE_SHOW(ifindex, fmt_dec); 93NETDEVICE_SHOW(ifindex, fmt_dec);
@@ -210,6 +211,7 @@ static ssize_t store_tx_queue_len(struct device *dev,
210 211
211static struct device_attribute net_class_attributes[] = { 212static struct device_attribute net_class_attributes[] = {
212 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL), 213 __ATTR(addr_len, S_IRUGO, show_addr_len, NULL),
214 __ATTR(dev_id, S_IRUGO, show_dev_id, NULL),
213 __ATTR(iflink, S_IRUGO, show_iflink, NULL), 215 __ATTR(iflink, S_IRUGO, show_iflink, NULL),
214 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL), 216 __ATTR(ifindex, S_IRUGO, show_ifindex, NULL),
215 __ATTR(features, S_IRUGO, show_features, NULL), 217 __ATTR(features, S_IRUGO, show_features, NULL),
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index bc39e417694a..cf857c4dc7b1 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -82,6 +82,11 @@ int rtnl_trylock(void)
82 return mutex_trylock(&rtnl_mutex); 82 return mutex_trylock(&rtnl_mutex);
83} 83}
84 84
85int rtnl_is_locked(void)
86{
87 return mutex_is_locked(&rtnl_mutex);
88}
89
85static struct rtnl_link *rtnl_msg_handlers[NPROTO]; 90static struct rtnl_link *rtnl_msg_handlers[NPROTO];
86 91
87static inline int rtm_msgindex(int msgtype) 92static inline int rtm_msgindex(int msgtype)
@@ -1402,6 +1407,7 @@ EXPORT_SYMBOL(rtnetlink_put_metrics);
1402EXPORT_SYMBOL(rtnl_lock); 1407EXPORT_SYMBOL(rtnl_lock);
1403EXPORT_SYMBOL(rtnl_trylock); 1408EXPORT_SYMBOL(rtnl_trylock);
1404EXPORT_SYMBOL(rtnl_unlock); 1409EXPORT_SYMBOL(rtnl_unlock);
1410EXPORT_SYMBOL(rtnl_is_locked);
1405EXPORT_SYMBOL(rtnl_unicast); 1411EXPORT_SYMBOL(rtnl_unicast);
1406EXPORT_SYMBOL(rtnl_notify); 1412EXPORT_SYMBOL(rtnl_notify);
1407EXPORT_SYMBOL(rtnl_set_sk_err); 1413EXPORT_SYMBOL(rtnl_set_sk_err);
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 7053bb827bc8..0bcdc9250279 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -46,29 +46,24 @@ struct {
46 struct kfifo *fifo; 46 struct kfifo *fifo;
47 spinlock_t lock; 47 spinlock_t lock;
48 wait_queue_head_t wait; 48 wait_queue_head_t wait;
49 struct timeval tstart; 49 struct timespec tstart;
50} dccpw; 50} dccpw;
51 51
52static void printl(const char *fmt, ...) 52static void printl(const char *fmt, ...)
53{ 53{
54 va_list args; 54 va_list args;
55 int len; 55 int len;
56 struct timeval now; 56 struct timespec now;
57 char tbuf[256]; 57 char tbuf[256];
58 58
59 va_start(args, fmt); 59 va_start(args, fmt);
60 do_gettimeofday(&now); 60 getnstimeofday(&now);
61 61
62 now.tv_sec -= dccpw.tstart.tv_sec; 62 now = timespec_sub(now, dccpw.tstart);
63 now.tv_usec -= dccpw.tstart.tv_usec;
64 if (now.tv_usec < 0) {
65 --now.tv_sec;
66 now.tv_usec += 1000000;
67 }
68 63
69 len = sprintf(tbuf, "%lu.%06lu ", 64 len = sprintf(tbuf, "%lu.%06lu ",
70 (unsigned long) now.tv_sec, 65 (unsigned long) now.tv_sec,
71 (unsigned long) now.tv_usec); 66 (unsigned long) now.tv_nsec / NSEC_PER_USEC);
72 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args); 67 len += vscnprintf(tbuf+len, sizeof(tbuf)-len, fmt, args);
73 va_end(args); 68 va_end(args);
74 69
@@ -119,7 +114,7 @@ static struct jprobe dccp_send_probe = {
119static int dccpprobe_open(struct inode *inode, struct file *file) 114static int dccpprobe_open(struct inode *inode, struct file *file)
120{ 115{
121 kfifo_reset(dccpw.fifo); 116 kfifo_reset(dccpw.fifo);
122 do_gettimeofday(&dccpw.tstart); 117 getnstimeofday(&dccpw.tstart);
123 return 0; 118 return 0;
124} 119}
125 120
@@ -145,7 +140,7 @@ static ssize_t dccpprobe_read(struct file *file, char __user *buf,
145 goto out_free; 140 goto out_free;
146 141
147 cnt = kfifo_get(dccpw.fifo, tbuf, len); 142 cnt = kfifo_get(dccpw.fifo, tbuf, len);
148 error = copy_to_user(buf, tbuf, cnt); 143 error = copy_to_user(buf, tbuf, cnt) ? -EFAULT : 0;
149 144
150out_free: 145out_free:
151 vfree(tbuf); 146 vfree(tbuf);
diff --git a/net/ipv4/fib_hash.c b/net/ipv4/fib_hash.c
index 02088deb0461..2e2fc3376ac9 100644
--- a/net/ipv4/fib_hash.c
+++ b/net/ipv4/fib_hash.c
@@ -1003,7 +1003,7 @@ static unsigned fib_flag_trans(int type, __be32 mask, struct fib_info *fi)
1003static int fib_seq_show(struct seq_file *seq, void *v) 1003static int fib_seq_show(struct seq_file *seq, void *v)
1004{ 1004{
1005 struct fib_iter_state *iter; 1005 struct fib_iter_state *iter;
1006 char bf[128]; 1006 int len;
1007 __be32 prefix, mask; 1007 __be32 prefix, mask;
1008 unsigned flags; 1008 unsigned flags;
1009 struct fib_node *f; 1009 struct fib_node *f;
@@ -1025,18 +1025,19 @@ static int fib_seq_show(struct seq_file *seq, void *v)
1025 mask = FZ_MASK(iter->zone); 1025 mask = FZ_MASK(iter->zone);
1026 flags = fib_flag_trans(fa->fa_type, mask, fi); 1026 flags = fib_flag_trans(fa->fa_type, mask, fi);
1027 if (fi) 1027 if (fi)
1028 snprintf(bf, sizeof(bf), 1028 seq_printf(seq,
1029 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u", 1029 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
1030 fi->fib_dev ? fi->fib_dev->name : "*", prefix, 1030 fi->fib_dev ? fi->fib_dev->name : "*", prefix,
1031 fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority, 1031 fi->fib_nh->nh_gw, flags, 0, 0, fi->fib_priority,
1032 mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0), 1032 mask, (fi->fib_advmss ? fi->fib_advmss + 40 : 0),
1033 fi->fib_window, 1033 fi->fib_window,
1034 fi->fib_rtt >> 3); 1034 fi->fib_rtt >> 3, &len);
1035 else 1035 else
1036 snprintf(bf, sizeof(bf), 1036 seq_printf(seq,
1037 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u", 1037 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u%n",
1038 prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0); 1038 prefix, 0, flags, 0, 0, 0, mask, 0, 0, 0, &len);
1039 seq_printf(seq, "%-127s\n", bf); 1039
1040 seq_printf(seq, "%*s\n", 127 - len, "");
1040out: 1041out:
1041 return 0; 1042 return 0;
1042} 1043}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index ea294fffb9ce..4b02d14e7ab9 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -2602,15 +2602,16 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2602 list_for_each_entry_rcu(fa, &li->falh, fa_list) { 2602 list_for_each_entry_rcu(fa, &li->falh, fa_list) {
2603 const struct fib_info *fi = fa->fa_info; 2603 const struct fib_info *fi = fa->fa_info;
2604 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi); 2604 unsigned flags = fib_flag_trans(fa->fa_type, mask, fi);
2605 char bf[128]; 2605 int len;
2606 2606
2607 if (fa->fa_type == RTN_BROADCAST 2607 if (fa->fa_type == RTN_BROADCAST
2608 || fa->fa_type == RTN_MULTICAST) 2608 || fa->fa_type == RTN_MULTICAST)
2609 continue; 2609 continue;
2610 2610
2611 if (fi) 2611 if (fi)
2612 snprintf(bf, sizeof(bf), 2612 seq_printf(seq,
2613 "%s\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u", 2613 "%s\t%08X\t%08X\t%04X\t%d\t%u\t"
2614 "%d\t%08X\t%d\t%u\t%u%n",
2614 fi->fib_dev ? fi->fib_dev->name : "*", 2615 fi->fib_dev ? fi->fib_dev->name : "*",
2615 prefix, 2616 prefix,
2616 fi->fib_nh->nh_gw, flags, 0, 0, 2617 fi->fib_nh->nh_gw, flags, 0, 0,
@@ -2619,14 +2620,15 @@ static int fib_route_seq_show(struct seq_file *seq, void *v)
2619 (fi->fib_advmss ? 2620 (fi->fib_advmss ?
2620 fi->fib_advmss + 40 : 0), 2621 fi->fib_advmss + 40 : 0),
2621 fi->fib_window, 2622 fi->fib_window,
2622 fi->fib_rtt >> 3); 2623 fi->fib_rtt >> 3, &len);
2623 else 2624 else
2624 snprintf(bf, sizeof(bf), 2625 seq_printf(seq,
2625 "*\t%08X\t%08X\t%04X\t%d\t%u\t%d\t%08X\t%d\t%u\t%u", 2626 "*\t%08X\t%08X\t%04X\t%d\t%u\t"
2627 "%d\t%08X\t%d\t%u\t%u%n",
2626 prefix, 0, flags, 0, 0, 0, 2628 prefix, 0, flags, 0, 0, 0,
2627 mask, 0, 0, 0); 2629 mask, 0, 0, 0, &len);
2628 2630
2629 seq_printf(seq, "%-127s\n", bf); 2631 seq_printf(seq, "%*s\n", 127 - len, "");
2630 } 2632 }
2631 } 2633 }
2632 2634
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index f064031f2031..c67d00e8c600 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -847,7 +847,7 @@ static void icmp_echo(struct sk_buff *skb)
847 */ 847 */
848static void icmp_timestamp(struct sk_buff *skb) 848static void icmp_timestamp(struct sk_buff *skb)
849{ 849{
850 struct timeval tv; 850 struct timespec tv;
851 struct icmp_bxm icmp_param; 851 struct icmp_bxm icmp_param;
852 /* 852 /*
853 * Too short. 853 * Too short.
@@ -858,9 +858,9 @@ static void icmp_timestamp(struct sk_buff *skb)
858 /* 858 /*
859 * Fill in the current time as ms since midnight UT: 859 * Fill in the current time as ms since midnight UT:
860 */ 860 */
861 do_gettimeofday(&tv); 861 getnstimeofday(&tv);
862 icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * 1000 + 862 icmp_param.data.times[1] = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC +
863 tv.tv_usec / 1000); 863 tv.tv_nsec / NSEC_PER_MSEC);
864 icmp_param.data.times[2] = icmp_param.data.times[1]; 864 icmp_param.data.times[2] = icmp_param.data.times[1];
865 if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4)) 865 if (skb_copy_bits(skb, 0, &icmp_param.data.times[0], 4))
866 BUG(); 866 BUG();
@@ -1144,7 +1144,7 @@ static void __net_exit icmp_sk_exit(struct net *net)
1144 net->ipv4.icmp_sk = NULL; 1144 net->ipv4.icmp_sk = NULL;
1145} 1145}
1146 1146
1147int __net_init icmp_sk_init(struct net *net) 1147static int __net_init icmp_sk_init(struct net *net)
1148{ 1148{
1149 int i, err; 1149 int i, err;
1150 1150
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index d107543d3f81..33126ad2cfdc 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -55,10 +55,10 @@ void ip_options_build(struct sk_buff * skb, struct ip_options * opt,
55 if (opt->ts_needaddr) 55 if (opt->ts_needaddr)
56 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt); 56 ip_rt_get_source(iph+opt->ts+iph[opt->ts+2]-9, rt);
57 if (opt->ts_needtime) { 57 if (opt->ts_needtime) {
58 struct timeval tv; 58 struct timespec tv;
59 __be32 midtime; 59 __be32 midtime;
60 do_gettimeofday(&tv); 60 getnstimeofday(&tv);
61 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); 61 midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
62 memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4); 62 memcpy(iph+opt->ts+iph[opt->ts+2]-5, &midtime, 4);
63 } 63 }
64 return; 64 return;
@@ -406,10 +406,10 @@ int ip_options_compile(struct net *net,
406 break; 406 break;
407 } 407 }
408 if (timeptr) { 408 if (timeptr) {
409 struct timeval tv; 409 struct timespec tv;
410 __be32 midtime; 410 __be32 midtime;
411 do_gettimeofday(&tv); 411 getnstimeofday(&tv);
412 midtime = htonl((tv.tv_sec % 86400) * 1000 + tv.tv_usec / 1000); 412 midtime = htonl((tv.tv_sec % 86400) * MSEC_PER_SEC + tv.tv_nsec / NSEC_PER_MSEC);
413 memcpy(timeptr, &midtime, sizeof(__be32)); 413 memcpy(timeptr, &midtime, sizeof(__be32));
414 opt->is_changed = 1; 414 opt->is_changed = 1;
415 } 415 }
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 780e9484c825..ce25a13f3430 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -367,10 +367,10 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
367 "HHUptod\tSpecDst"); 367 "HHUptod\tSpecDst");
368 else { 368 else {
369 struct rtable *r = v; 369 struct rtable *r = v;
370 char temp[256]; 370 int len;
371 371
372 sprintf(temp, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t" 372 seq_printf(seq, "%s\t%08lX\t%08lX\t%8X\t%d\t%u\t%d\t"
373 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X", 373 "%08lX\t%d\t%u\t%u\t%02X\t%d\t%1d\t%08X%n",
374 r->u.dst.dev ? r->u.dst.dev->name : "*", 374 r->u.dst.dev ? r->u.dst.dev->name : "*",
375 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway, 375 (unsigned long)r->rt_dst, (unsigned long)r->rt_gateway,
376 r->rt_flags, atomic_read(&r->u.dst.__refcnt), 376 r->rt_flags, atomic_read(&r->u.dst.__refcnt),
@@ -384,8 +384,9 @@ static int rt_cache_seq_show(struct seq_file *seq, void *v)
384 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1, 384 r->u.dst.hh ? atomic_read(&r->u.dst.hh->hh_refcnt) : -1,
385 r->u.dst.hh ? (r->u.dst.hh->hh_output == 385 r->u.dst.hh ? (r->u.dst.hh->hh_output ==
386 dev_queue_xmit) : 0, 386 dev_queue_xmit) : 0,
387 r->rt_spec_dst); 387 r->rt_spec_dst, &len);
388 seq_printf(seq, "%-127s\n", temp); 388
389 seq_printf(seq, "%*s\n", 127 - len, "");
389 } 390 }
390 return 0; 391 return 0;
391} 392}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 58ac838bf460..f88653138621 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1722,7 +1722,7 @@ static int tcp_close_state(struct sock *sk)
1722 1722
1723/* 1723/*
1724 * Shutdown the sending side of a connection. Much like close except 1724 * Shutdown the sending side of a connection. Much like close except
1725 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD). 1725 * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
1726 */ 1726 */
1727 1727
1728void tcp_shutdown(struct sock *sk, int how) 1728void tcp_shutdown(struct sock *sk, int how)
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index cdc051bfdb4d..ac9b8482f702 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2298,7 +2298,7 @@ static inline int tcp_packet_delayed(struct tcp_sock *tp)
2298{ 2298{
2299 return !tp->retrans_stamp || 2299 return !tp->retrans_stamp ||
2300 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr && 2300 (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
2301 (__s32)(tp->rx_opt.rcv_tsecr - tp->retrans_stamp) < 0); 2301 before(tp->rx_opt.rcv_tsecr, tp->retrans_stamp));
2302} 2302}
2303 2303
2304/* Undo procedures. */ 2304/* Undo procedures. */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 776615180b93..0e9bc120707d 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2255,13 +2255,13 @@ void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2255} 2255}
2256 2256
2257static void get_openreq4(struct sock *sk, struct request_sock *req, 2257static void get_openreq4(struct sock *sk, struct request_sock *req,
2258 char *tmpbuf, int i, int uid) 2258 struct seq_file *f, int i, int uid, int *len)
2259{ 2259{
2260 const struct inet_request_sock *ireq = inet_rsk(req); 2260 const struct inet_request_sock *ireq = inet_rsk(req);
2261 int ttd = req->expires - jiffies; 2261 int ttd = req->expires - jiffies;
2262 2262
2263 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 2263 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2264 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p", 2264 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %p%n",
2265 i, 2265 i,
2266 ireq->loc_addr, 2266 ireq->loc_addr,
2267 ntohs(inet_sk(sk)->sport), 2267 ntohs(inet_sk(sk)->sport),
@@ -2276,10 +2276,11 @@ static void get_openreq4(struct sock *sk, struct request_sock *req,
2276 0, /* non standard timer */ 2276 0, /* non standard timer */
2277 0, /* open_requests have no inode */ 2277 0, /* open_requests have no inode */
2278 atomic_read(&sk->sk_refcnt), 2278 atomic_read(&sk->sk_refcnt),
2279 req); 2279 req,
2280 len);
2280} 2281}
2281 2282
2282static void get_tcp4_sock(struct sock *sk, char *tmpbuf, int i) 2283static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2283{ 2284{
2284 int timer_active; 2285 int timer_active;
2285 unsigned long timer_expires; 2286 unsigned long timer_expires;
@@ -2305,8 +2306,8 @@ static void get_tcp4_sock(struct sock *sk, char *tmpbuf, int i)
2305 timer_expires = jiffies; 2306 timer_expires = jiffies;
2306 } 2307 }
2307 2308
2308 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX " 2309 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2309 "%08X %5d %8d %lu %d %p %u %u %u %u %d", 2310 "%08X %5d %8d %lu %d %p %u %u %u %u %d%n",
2310 i, src, srcp, dest, destp, sk->sk_state, 2311 i, src, srcp, dest, destp, sk->sk_state,
2311 tp->write_seq - tp->snd_una, 2312 tp->write_seq - tp->snd_una,
2312 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog : 2313 sk->sk_state == TCP_LISTEN ? sk->sk_ack_backlog :
@@ -2322,11 +2323,12 @@ static void get_tcp4_sock(struct sock *sk, char *tmpbuf, int i)
2322 icsk->icsk_ack.ato, 2323 icsk->icsk_ack.ato,
2323 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, 2324 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2324 tp->snd_cwnd, 2325 tp->snd_cwnd,
2325 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh); 2326 tp->snd_ssthresh >= 0xFFFF ? -1 : tp->snd_ssthresh,
2327 len);
2326} 2328}
2327 2329
2328static void get_timewait4_sock(struct inet_timewait_sock *tw, 2330static void get_timewait4_sock(struct inet_timewait_sock *tw,
2329 char *tmpbuf, int i) 2331 struct seq_file *f, int i, int *len)
2330{ 2332{
2331 __be32 dest, src; 2333 __be32 dest, src;
2332 __u16 destp, srcp; 2334 __u16 destp, srcp;
@@ -2340,11 +2342,11 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
2340 destp = ntohs(tw->tw_dport); 2342 destp = ntohs(tw->tw_dport);
2341 srcp = ntohs(tw->tw_sport); 2343 srcp = ntohs(tw->tw_sport);
2342 2344
2343 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 2345 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2344 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p", 2346 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
2345 i, src, srcp, dest, destp, tw->tw_substate, 0, 0, 2347 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2346 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, 2348 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2347 atomic_read(&tw->tw_refcnt), tw); 2349 atomic_read(&tw->tw_refcnt), tw, len);
2348} 2350}
2349 2351
2350#define TMPSZ 150 2352#define TMPSZ 150
@@ -2352,7 +2354,7 @@ static void get_timewait4_sock(struct inet_timewait_sock *tw,
2352static int tcp4_seq_show(struct seq_file *seq, void *v) 2354static int tcp4_seq_show(struct seq_file *seq, void *v)
2353{ 2355{
2354 struct tcp_iter_state* st; 2356 struct tcp_iter_state* st;
2355 char tmpbuf[TMPSZ + 1]; 2357 int len;
2356 2358
2357 if (v == SEQ_START_TOKEN) { 2359 if (v == SEQ_START_TOKEN) {
2358 seq_printf(seq, "%-*s\n", TMPSZ - 1, 2360 seq_printf(seq, "%-*s\n", TMPSZ - 1,
@@ -2366,16 +2368,16 @@ static int tcp4_seq_show(struct seq_file *seq, void *v)
2366 switch (st->state) { 2368 switch (st->state) {
2367 case TCP_SEQ_STATE_LISTENING: 2369 case TCP_SEQ_STATE_LISTENING:
2368 case TCP_SEQ_STATE_ESTABLISHED: 2370 case TCP_SEQ_STATE_ESTABLISHED:
2369 get_tcp4_sock(v, tmpbuf, st->num); 2371 get_tcp4_sock(v, seq, st->num, &len);
2370 break; 2372 break;
2371 case TCP_SEQ_STATE_OPENREQ: 2373 case TCP_SEQ_STATE_OPENREQ:
2372 get_openreq4(st->syn_wait_sk, v, tmpbuf, st->num, st->uid); 2374 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2373 break; 2375 break;
2374 case TCP_SEQ_STATE_TIME_WAIT: 2376 case TCP_SEQ_STATE_TIME_WAIT:
2375 get_timewait4_sock(v, tmpbuf, st->num); 2377 get_timewait4_sock(v, seq, st->num, &len);
2376 break; 2378 break;
2377 } 2379 }
2378 seq_printf(seq, "%-*s\n", TMPSZ - 1, tmpbuf); 2380 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2379out: 2381out:
2380 return 0; 2382 return 0;
2381} 2383}
diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
index 1c509592574a..5ff0ce6e9d39 100644
--- a/net/ipv4/tcp_probe.c
+++ b/net/ipv4/tcp_probe.c
@@ -190,19 +190,18 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
190 190
191 width = tcpprobe_sprint(tbuf, sizeof(tbuf)); 191 width = tcpprobe_sprint(tbuf, sizeof(tbuf));
192 192
193 if (width < len) 193 if (cnt + width < len)
194 tcp_probe.tail = (tcp_probe.tail + 1) % bufsize; 194 tcp_probe.tail = (tcp_probe.tail + 1) % bufsize;
195 195
196 spin_unlock_bh(&tcp_probe.lock); 196 spin_unlock_bh(&tcp_probe.lock);
197 197
198 /* if record greater than space available 198 /* if record greater than space available
199 return partial buffer (so far) */ 199 return partial buffer (so far) */
200 if (width >= len) 200 if (cnt + width >= len)
201 break; 201 break;
202 202
203 error = copy_to_user(buf + cnt, tbuf, width); 203 if (copy_to_user(buf + cnt, tbuf, width))
204 if (error) 204 return -EFAULT;
205 break;
206 cnt += width; 205 cnt += width;
207 } 206 }
208 207
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index b053ac795275..1f535e315188 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1619,7 +1619,8 @@ void udp_proc_unregister(struct net *net, struct udp_seq_afinfo *afinfo)
1619} 1619}
1620 1620
1621/* ------------------------------------------------------------------------ */ 1621/* ------------------------------------------------------------------------ */
1622static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket) 1622static void udp4_format_sock(struct sock *sp, struct seq_file *f,
1623 int bucket, int *len)
1623{ 1624{
1624 struct inet_sock *inet = inet_sk(sp); 1625 struct inet_sock *inet = inet_sk(sp);
1625 __be32 dest = inet->daddr; 1626 __be32 dest = inet->daddr;
@@ -1627,13 +1628,13 @@ static void udp4_format_sock(struct sock *sp, char *tmpbuf, int bucket)
1627 __u16 destp = ntohs(inet->dport); 1628 __u16 destp = ntohs(inet->dport);
1628 __u16 srcp = ntohs(inet->sport); 1629 __u16 srcp = ntohs(inet->sport);
1629 1630
1630 sprintf(tmpbuf, "%4d: %08X:%04X %08X:%04X" 1631 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
1631 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p", 1632 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %p%n",
1632 bucket, src, srcp, dest, destp, sp->sk_state, 1633 bucket, src, srcp, dest, destp, sp->sk_state,
1633 atomic_read(&sp->sk_wmem_alloc), 1634 atomic_read(&sp->sk_wmem_alloc),
1634 atomic_read(&sp->sk_rmem_alloc), 1635 atomic_read(&sp->sk_rmem_alloc),
1635 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), 1636 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
1636 atomic_read(&sp->sk_refcnt), sp); 1637 atomic_read(&sp->sk_refcnt), sp, len);
1637} 1638}
1638 1639
1639int udp4_seq_show(struct seq_file *seq, void *v) 1640int udp4_seq_show(struct seq_file *seq, void *v)
@@ -1644,11 +1645,11 @@ int udp4_seq_show(struct seq_file *seq, void *v)
1644 "rx_queue tr tm->when retrnsmt uid timeout " 1645 "rx_queue tr tm->when retrnsmt uid timeout "
1645 "inode"); 1646 "inode");
1646 else { 1647 else {
1647 char tmpbuf[129];
1648 struct udp_iter_state *state = seq->private; 1648 struct udp_iter_state *state = seq->private;
1649 int len;
1649 1650
1650 udp4_format_sock(v, tmpbuf, state->bucket); 1651 udp4_format_sock(v, seq, state->bucket, &len);
1651 seq_printf(seq, "%-127s\n", tmpbuf); 1652 seq_printf(seq, "%*s\n", 127 - len ,"");
1652 } 1653 }
1653 return 0; 1654 return 0;
1654} 1655}
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 42814a2ec9d7..b2c9becc02e8 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -167,7 +167,7 @@ config IPV6_SIT
167 Tunneling means encapsulating data of one protocol type within 167 Tunneling means encapsulating data of one protocol type within
168 another protocol and sending it over a channel that understands the 168 another protocol and sending it over a channel that understands the
169 encapsulating protocol. This driver implements encapsulation of IPv6 169 encapsulating protocol. This driver implements encapsulation of IPv6
170 into IPv4 packets. This is useful if you want to connect two IPv6 170 into IPv4 packets. This is useful if you want to connect to IPv6
171 networks over an IPv4-only path. 171 networks over an IPv4-only path.
172 172
173 Saying M here will produce a module called sit.ko. If unsure, say Y. 173 Saying M here will produce a module called sit.ko. If unsure, say Y.
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 8a0fd4007bdb..e591e09e5e4e 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4338,12 +4338,6 @@ int unregister_inet6addr_notifier(struct notifier_block *nb)
4338 4338
4339EXPORT_SYMBOL(unregister_inet6addr_notifier); 4339EXPORT_SYMBOL(unregister_inet6addr_notifier);
4340 4340
4341
4342static int addrconf_net_init(struct net *net)
4343{
4344 return 0;
4345}
4346
4347static void addrconf_net_exit(struct net *net) 4341static void addrconf_net_exit(struct net *net)
4348{ 4342{
4349 struct net_device *dev; 4343 struct net_device *dev;
@@ -4360,7 +4354,6 @@ static void addrconf_net_exit(struct net *net)
4360} 4354}
4361 4355
4362static struct pernet_operations addrconf_net_ops = { 4356static struct pernet_operations addrconf_net_ops = {
4363 .init = addrconf_net_init,
4364 .exit = addrconf_net_exit, 4357 .exit = addrconf_net_exit,
4365}; 4358};
4366 4359
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 50f3f8f8a59b..1ee4fa17c129 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1543,7 +1543,7 @@ out_timer:
1543static void fib6_net_exit(struct net *net) 1543static void fib6_net_exit(struct net *net)
1544{ 1544{
1545 rt6_ifdown(net, NULL); 1545 rt6_ifdown(net, NULL);
1546 del_timer(net->ipv6.ip6_fib_timer); 1546 del_timer_sync(net->ipv6.ip6_fib_timer);
1547 kfree(net->ipv6.ip6_fib_timer); 1547 kfree(net->ipv6.ip6_fib_timer);
1548#ifdef CONFIG_IPV6_MULTIPLE_TABLES 1548#ifdef CONFIG_IPV6_MULTIPLE_TABLES
1549 kfree(net->ipv6.fib6_local_tbl); 1549 kfree(net->ipv6.fib6_local_tbl);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 6193b124cbc7..396f0ea11090 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -971,6 +971,19 @@ static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
971 971
972 switch (optname) { 972 switch (optname) {
973 case IPV6_CHECKSUM: 973 case IPV6_CHECKSUM:
974 if (inet_sk(sk)->num == IPPROTO_ICMPV6 &&
975 level == IPPROTO_IPV6) {
976 /*
977 * RFC3542 tells that IPV6_CHECKSUM socket
978 * option in the IPPROTO_IPV6 level is not
979 * allowed on ICMPv6 sockets.
980 * If you want to set it, use IPPROTO_RAW
981 * level IPV6_CHECKSUM socket option
982 * (Linux extension).
983 */
984 return -EINVAL;
985 }
986
974 /* You may get strange result with a positive odd offset; 987 /* You may get strange result with a positive odd offset;
975 RFC2292bis agrees with me. */ 988 RFC2292bis agrees with me. */
976 if (val > 0 && (val&1)) 989 if (val > 0 && (val&1))
@@ -1046,6 +1059,11 @@ static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
1046 1059
1047 switch (optname) { 1060 switch (optname) {
1048 case IPV6_CHECKSUM: 1061 case IPV6_CHECKSUM:
1062 /*
1063 * We allow getsockopt() for IPPROTO_IPV6-level
1064 * IPV6_CHECKSUM socket option on ICMPv6 sockets
1065 * since RFC3542 is silent about it.
1066 */
1049 if (rp->checksum == 0) 1067 if (rp->checksum == 0)
1050 val = -1; 1068 val = -1;
1051 else 1069 else
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 210a079cfc6f..a493ad9b8914 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -150,7 +150,7 @@ static struct rt6_info ip6_null_entry_template = {
150static int ip6_pkt_prohibit(struct sk_buff *skb); 150static int ip6_pkt_prohibit(struct sk_buff *skb);
151static int ip6_pkt_prohibit_out(struct sk_buff *skb); 151static int ip6_pkt_prohibit_out(struct sk_buff *skb);
152 152
153struct rt6_info ip6_prohibit_entry_template = { 153static struct rt6_info ip6_prohibit_entry_template = {
154 .u = { 154 .u = {
155 .dst = { 155 .dst = {
156 .__refcnt = ATOMIC_INIT(1), 156 .__refcnt = ATOMIC_INIT(1),
@@ -2614,9 +2614,8 @@ struct ctl_table *ipv6_route_sysctl_init(struct net *net)
2614 2614
2615static int ip6_route_net_init(struct net *net) 2615static int ip6_route_net_init(struct net *net)
2616{ 2616{
2617 int ret = 0; 2617 int ret = -ENOMEM;
2618 2618
2619 ret = -ENOMEM;
2620 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template, 2619 net->ipv6.ip6_dst_ops = kmemdup(&ip6_dst_ops_template,
2621 sizeof(*net->ipv6.ip6_dst_ops), 2620 sizeof(*net->ipv6.ip6_dst_ops),
2622 GFP_KERNEL); 2621 GFP_KERNEL);
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1fb0fe42a72e..2403a31fe0f6 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -1907,7 +1907,7 @@ parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq)
1907 t->encap_family = xp->family; 1907 t->encap_family = xp->family;
1908 1908
1909 /* No way to set this via kame pfkey */ 1909 /* No way to set this via kame pfkey */
1910 t->aalgos = t->ealgos = t->calgos = ~0; 1910 t->allalgs = 1;
1911 xp->xfrm_nr++; 1911 xp->xfrm_nr++;
1912 return 0; 1912 return 0;
1913} 1913}
@@ -2356,7 +2356,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2356 struct xfrm_selector sel; 2356 struct xfrm_selector sel;
2357 struct km_event c; 2357 struct km_event c;
2358 struct sadb_x_sec_ctx *sec_ctx; 2358 struct sadb_x_sec_ctx *sec_ctx;
2359 struct xfrm_sec_ctx *pol_ctx; 2359 struct xfrm_sec_ctx *pol_ctx = NULL;
2360 2360
2361 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1], 2361 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC-1],
2362 ext_hdrs[SADB_EXT_ADDRESS_DST-1]) || 2362 ext_hdrs[SADB_EXT_ADDRESS_DST-1]) ||
@@ -2396,8 +2396,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, struct sadb_msg
2396 kfree(uctx); 2396 kfree(uctx);
2397 if (err) 2397 if (err)
2398 return err; 2398 return err;
2399 } else 2399 }
2400 pol_ctx = NULL;
2401 2400
2402 xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN, 2401 xp = xfrm_policy_bysel_ctx(XFRM_POLICY_TYPE_MAIN,
2403 pol->sadb_x_policy_dir - 1, &sel, pol_ctx, 2402 pol->sadb_x_policy_dir - 1, &sel, pol_ctx,
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 742003d3a841..9ee3affab346 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -13,6 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/jhash.h> 15#include <linux/jhash.h>
16#include <asm/unaligned.h>
16#include "ieee80211_i.h" 17#include "ieee80211_i.h"
17 18
18 19
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index 02de8f1522a3..3df809222d1c 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -7,7 +7,6 @@
7 * published by the Free Software Foundation. 7 * published by the Free Software Foundation.
8 */ 8 */
9 9
10#include <asm/unaligned.h>
11#include "mesh.h" 10#include "mesh.h"
12 11
13#define TEST_FRAME_LEN 8192 12#define TEST_FRAME_LEN 8192
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 6b75cb6c6300..a5e5c31c23ab 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -2248,10 +2248,13 @@ static void ieee80211_rx_bss_put(struct net_device *dev,
2248 struct ieee80211_sta_bss *bss) 2248 struct ieee80211_sta_bss *bss)
2249{ 2249{
2250 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr); 2250 struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
2251 if (!atomic_dec_and_test(&bss->users)) 2251
2252 local_bh_disable();
2253 if (!atomic_dec_and_lock(&bss->users, &local->sta_bss_lock)) {
2254 local_bh_enable();
2252 return; 2255 return;
2256 }
2253 2257
2254 spin_lock_bh(&local->sta_bss_lock);
2255 __ieee80211_rx_bss_hash_del(dev, bss); 2258 __ieee80211_rx_bss_hash_del(dev, bss);
2256 list_del(&bss->list); 2259 list_del(&bss->list);
2257 spin_unlock_bh(&local->sta_bss_lock); 2260 spin_unlock_bh(&local->sta_bss_lock);
@@ -2709,7 +2712,26 @@ static void ieee80211_rx_bss_info(struct net_device *dev,
2709 bss->wmm_ie_len = elems.wmm_param_len + 2; 2712 bss->wmm_ie_len = elems.wmm_param_len + 2;
2710 } else 2713 } else
2711 bss->wmm_ie_len = 0; 2714 bss->wmm_ie_len = 0;
2712 } else if (!elems.wmm_param && bss->wmm_ie) { 2715 } else if (elems.wmm_info &&
2716 (!bss->wmm_ie || bss->wmm_ie_len != elems.wmm_info_len ||
2717 memcmp(bss->wmm_ie, elems.wmm_info, elems.wmm_info_len))) {
2718 /* As for certain AP's Fifth bit is not set in WMM IE in
2719 * beacon frames.So while parsing the beacon frame the
2720 * wmm_info structure is used instead of wmm_param.
2721 * wmm_info structure was never used to set bss->wmm_ie.
2722 * This code fixes this problem by copying the WME
2723 * information from wmm_info to bss->wmm_ie and enabling
2724 * n-band association.
2725 */
2726 kfree(bss->wmm_ie);
2727 bss->wmm_ie = kmalloc(elems.wmm_info_len + 2, GFP_ATOMIC);
2728 if (bss->wmm_ie) {
2729 memcpy(bss->wmm_ie, elems.wmm_info - 2,
2730 elems.wmm_info_len + 2);
2731 bss->wmm_ie_len = elems.wmm_info_len + 2;
2732 } else
2733 bss->wmm_ie_len = 0;
2734 } else if (!elems.wmm_param && !elems.wmm_info && bss->wmm_ie) {
2713 kfree(bss->wmm_ie); 2735 kfree(bss->wmm_ie);
2714 bss->wmm_ie = NULL; 2736 bss->wmm_ie = NULL;
2715 bss->wmm_ie_len = 0; 2737 bss->wmm_ie_len = 0;
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 52e4554fdde7..02f436a86061 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2170,7 +2170,7 @@ void __ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb,
2170 struct ieee80211_supported_band *sband; 2170 struct ieee80211_supported_band *sband;
2171 2171
2172 if (status->band < 0 || 2172 if (status->band < 0 ||
2173 status->band > IEEE80211_NUM_BANDS) { 2173 status->band >= IEEE80211_NUM_BANDS) {
2174 WARN_ON(1); 2174 WARN_ON(1);
2175 return; 2175 return;
2176 } 2176 }
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 4e94e4026e78..64faa3dc488f 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -709,7 +709,7 @@ void ieee80211_requeue(struct ieee80211_local *local, int queue)
709 struct ieee80211_sched_data *q = qdisc_priv(root_qd); 709 struct ieee80211_sched_data *q = qdisc_priv(root_qd);
710 struct Qdisc *qdisc = q->queues[queue]; 710 struct Qdisc *qdisc = q->queues[queue];
711 struct sk_buff *skb = NULL; 711 struct sk_buff *skb = NULL;
712 u32 len = qdisc->q.qlen; 712 u32 len;
713 713
714 if (!qdisc || !qdisc->dequeue) 714 if (!qdisc || !qdisc->dequeue)
715 return; 715 return;
diff --git a/net/rose/rose_route.c b/net/rose/rose_route.c
index fb9359fb2358..5053a53ba24f 100644
--- a/net/rose/rose_route.c
+++ b/net/rose/rose_route.c
@@ -857,7 +857,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
857 src_addr = (rose_address *)(skb->data + 9); 857 src_addr = (rose_address *)(skb->data + 9);
858 dest_addr = (rose_address *)(skb->data + 4); 858 dest_addr = (rose_address *)(skb->data + 4);
859 859
860 spin_lock_bh(&rose_node_list_lock);
861 spin_lock_bh(&rose_neigh_list_lock); 860 spin_lock_bh(&rose_neigh_list_lock);
862 spin_lock_bh(&rose_route_list_lock); 861 spin_lock_bh(&rose_route_list_lock);
863 862
@@ -1060,7 +1059,6 @@ int rose_route_frame(struct sk_buff *skb, ax25_cb *ax25)
1060out: 1059out:
1061 spin_unlock_bh(&rose_route_list_lock); 1060 spin_unlock_bh(&rose_route_list_lock);
1062 spin_unlock_bh(&rose_neigh_list_lock); 1061 spin_unlock_bh(&rose_neigh_list_lock);
1063 spin_unlock_bh(&rose_node_list_lock);
1064 1062
1065 return res; 1063 return res;
1066} 1064}
diff --git a/net/rxrpc/rxkad.c b/net/rxrpc/rxkad.c
index 6d38a81b336d..ba3f6e49fddc 100644
--- a/net/rxrpc/rxkad.c
+++ b/net/rxrpc/rxkad.c
@@ -493,8 +493,8 @@ static int rxkad_verify_packet(const struct rxrpc_call *call,
493 __be32 x[2]; 493 __be32 x[2];
494 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */ 494 } tmpbuf __attribute__((aligned(8))); /* must all be in same page */
495 __be32 x; 495 __be32 x;
496 u16 y;
497 __be16 cksum; 496 __be16 cksum;
497 u32 y;
498 int ret; 498 int ret;
499 499
500 sp = rxrpc_skb(skb); 500 sp = rxrpc_skb(skb);
diff --git a/net/sctp/objcnt.c b/net/sctp/objcnt.c
index cfeb07ea1b04..f73ec0ea93ba 100644
--- a/net/sctp/objcnt.c
+++ b/net/sctp/objcnt.c
@@ -83,13 +83,12 @@ static sctp_dbg_objcnt_entry_t sctp_dbg_objcnt[] = {
83 */ 83 */
84static int sctp_objcnt_seq_show(struct seq_file *seq, void *v) 84static int sctp_objcnt_seq_show(struct seq_file *seq, void *v)
85{ 85{
86 int i; 86 int i, len;
87 char temp[128];
88 87
89 i = (int)*(loff_t *)v; 88 i = (int)*(loff_t *)v;
90 sprintf(temp, "%s: %d", sctp_dbg_objcnt[i].label, 89 seq_printf(seq, "%s: %d%n", sctp_dbg_objcnt[i].label,
91 atomic_read(sctp_dbg_objcnt[i].counter)); 90 atomic_read(sctp_dbg_objcnt[i].counter), &len);
92 seq_printf(seq, "%-127s\n", temp); 91 seq_printf(seq, "%*s\n", 127 - len, "");
93 return 0; 92 return 0;
94} 93}
95 94
diff --git a/net/socket.c b/net/socket.c
index 9b5c917f8a6b..66c4a8cf6db9 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2327,9 +2327,6 @@ int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how)
2327 return sock->ops->shutdown(sock, how); 2327 return sock->ops->shutdown(sock, how);
2328} 2328}
2329 2329
2330/* ABI emulation layers need these two */
2331EXPORT_SYMBOL(move_addr_to_kernel);
2332EXPORT_SYMBOL(move_addr_to_user);
2333EXPORT_SYMBOL(sock_create); 2330EXPORT_SYMBOL(sock_create);
2334EXPORT_SYMBOL(sock_create_kern); 2331EXPORT_SYMBOL(sock_create_kern);
2335EXPORT_SYMBOL(sock_create_lite); 2332EXPORT_SYMBOL(sock_create_lite);
diff --git a/net/sunrpc/Makefile b/net/sunrpc/Makefile
index 92e1dbe50947..5369aa369b35 100644
--- a/net/sunrpc/Makefile
+++ b/net/sunrpc/Makefile
@@ -8,7 +8,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
8obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/ 8obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/
9 9
10sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \ 10sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
11 auth.o auth_null.o auth_unix.o \ 11 auth.o auth_null.o auth_unix.o auth_generic.o \
12 svc.o svcsock.o svcauth.o svcauth_unix.o \ 12 svc.o svcsock.o svcauth.o svcauth_unix.o \
13 rpcb_clnt.o timer.o xdr.o \ 13 rpcb_clnt.o timer.o xdr.o \
14 sunrpc_syms.o cache.o rpc_pipe.o \ 14 sunrpc_syms.o cache.o rpc_pipe.o \
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index eca941ce298b..6bfea9ed6869 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/slab.h> 12#include <linux/slab.h>
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/hash.h>
14#include <linux/sunrpc/clnt.h> 15#include <linux/sunrpc/clnt.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
16 17
@@ -219,6 +220,9 @@ rpcauth_destroy_credcache(struct rpc_auth *auth)
219} 220}
220EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache); 221EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache);
221 222
223
224#define RPC_AUTH_EXPIRY_MORATORIUM (60 * HZ)
225
222/* 226/*
223 * Remove stale credentials. Avoid sleeping inside the loop. 227 * Remove stale credentials. Avoid sleeping inside the loop.
224 */ 228 */
@@ -227,6 +231,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
227{ 231{
228 spinlock_t *cache_lock; 232 spinlock_t *cache_lock;
229 struct rpc_cred *cred; 233 struct rpc_cred *cred;
234 unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM;
230 235
231 while (!list_empty(&cred_unused)) { 236 while (!list_empty(&cred_unused)) {
232 cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru); 237 cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru);
@@ -234,6 +239,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
234 number_cred_unused--; 239 number_cred_unused--;
235 if (atomic_read(&cred->cr_count) != 0) 240 if (atomic_read(&cred->cr_count) != 0)
236 continue; 241 continue;
242 /* Enforce a 5 second garbage collection moratorium */
243 if (time_in_range(cred->cr_expire, expired, jiffies) &&
244 test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
245 continue;
237 cache_lock = &cred->cr_auth->au_credcache->lock; 246 cache_lock = &cred->cr_auth->au_credcache->lock;
238 spin_lock(cache_lock); 247 spin_lock(cache_lock);
239 if (atomic_read(&cred->cr_count) == 0) { 248 if (atomic_read(&cred->cr_count) == 0) {
@@ -280,10 +289,9 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
280 struct hlist_node *pos; 289 struct hlist_node *pos;
281 struct rpc_cred *cred = NULL, 290 struct rpc_cred *cred = NULL,
282 *entry, *new; 291 *entry, *new;
283 int nr = 0; 292 unsigned int nr;
284 293
285 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) 294 nr = hash_long(acred->uid, RPC_CREDCACHE_HASHBITS);
286 nr = acred->uid & RPC_CREDCACHE_MASK;
287 295
288 rcu_read_lock(); 296 rcu_read_lock();
289 hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) { 297 hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) {
@@ -356,7 +364,6 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
356 put_group_info(acred.group_info); 364 put_group_info(acred.group_info);
357 return ret; 365 return ret;
358} 366}
359EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
360 367
361void 368void
362rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, 369rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
@@ -375,41 +382,58 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
375} 382}
376EXPORT_SYMBOL_GPL(rpcauth_init_cred); 383EXPORT_SYMBOL_GPL(rpcauth_init_cred);
377 384
378struct rpc_cred * 385void
379rpcauth_bindcred(struct rpc_task *task) 386rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
387{
388 task->tk_msg.rpc_cred = get_rpccred(cred);
389 dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
390 cred->cr_auth->au_ops->au_name, cred);
391}
392EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
393
394static void
395rpcauth_bind_root_cred(struct rpc_task *task)
380{ 396{
381 struct rpc_auth *auth = task->tk_client->cl_auth; 397 struct rpc_auth *auth = task->tk_client->cl_auth;
382 struct auth_cred acred = { 398 struct auth_cred acred = {
383 .uid = current->fsuid, 399 .uid = 0,
384 .gid = current->fsgid, 400 .gid = 0,
385 .group_info = current->group_info,
386 }; 401 };
387 struct rpc_cred *ret; 402 struct rpc_cred *ret;
388 int flags = 0;
389 403
390 dprintk("RPC: %5u looking up %s cred\n", 404 dprintk("RPC: %5u looking up %s cred\n",
391 task->tk_pid, task->tk_client->cl_auth->au_ops->au_name); 405 task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
392 get_group_info(acred.group_info); 406 ret = auth->au_ops->lookup_cred(auth, &acred, 0);
393 if (task->tk_flags & RPC_TASK_ROOTCREDS) 407 if (!IS_ERR(ret))
394 flags |= RPCAUTH_LOOKUP_ROOTCREDS; 408 task->tk_msg.rpc_cred = ret;
395 ret = auth->au_ops->lookup_cred(auth, &acred, flags); 409 else
410 task->tk_status = PTR_ERR(ret);
411}
412
413static void
414rpcauth_bind_new_cred(struct rpc_task *task)
415{
416 struct rpc_auth *auth = task->tk_client->cl_auth;
417 struct rpc_cred *ret;
418
419 dprintk("RPC: %5u looking up %s cred\n",
420 task->tk_pid, auth->au_ops->au_name);
421 ret = rpcauth_lookupcred(auth, 0);
396 if (!IS_ERR(ret)) 422 if (!IS_ERR(ret))
397 task->tk_msg.rpc_cred = ret; 423 task->tk_msg.rpc_cred = ret;
398 else 424 else
399 task->tk_status = PTR_ERR(ret); 425 task->tk_status = PTR_ERR(ret);
400 put_group_info(acred.group_info);
401 return ret;
402} 426}
403 427
404void 428void
405rpcauth_holdcred(struct rpc_task *task) 429rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
406{ 430{
407 struct rpc_cred *cred = task->tk_msg.rpc_cred; 431 if (cred != NULL)
408 if (cred != NULL) { 432 cred->cr_ops->crbind(task, cred);
409 get_rpccred(cred); 433 else if (flags & RPC_TASK_ROOTCREDS)
410 dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid, 434 rpcauth_bind_root_cred(task);
411 cred->cr_auth->au_ops->au_name, cred); 435 else
412 } 436 rpcauth_bind_new_cred(task);
413} 437}
414 438
415void 439void
@@ -550,6 +574,7 @@ static struct shrinker rpc_cred_shrinker = {
550void __init rpcauth_init_module(void) 574void __init rpcauth_init_module(void)
551{ 575{
552 rpc_init_authunix(); 576 rpc_init_authunix();
577 rpc_init_generic_auth();
553 register_shrinker(&rpc_cred_shrinker); 578 register_shrinker(&rpc_cred_shrinker);
554} 579}
555 580
diff --git a/net/sunrpc/auth_generic.c b/net/sunrpc/auth_generic.c
new file mode 100644
index 000000000000..d927d9f57412
--- /dev/null
+++ b/net/sunrpc/auth_generic.c
@@ -0,0 +1,177 @@
1/*
2 * Generic RPC credential
3 *
4 * Copyright (C) 2008, Trond Myklebust <Trond.Myklebust@netapp.com>
5 */
6
7#include <linux/err.h>
8#include <linux/types.h>
9#include <linux/module.h>
10#include <linux/sched.h>
11#include <linux/sunrpc/auth.h>
12#include <linux/sunrpc/clnt.h>
13#include <linux/sunrpc/debug.h>
14#include <linux/sunrpc/sched.h>
15
16#ifdef RPC_DEBUG
17# define RPCDBG_FACILITY RPCDBG_AUTH
18#endif
19
20#define RPC_ANONYMOUS_USERID ((uid_t)-2)
21#define RPC_ANONYMOUS_GROUPID ((gid_t)-2)
22
23struct generic_cred {
24 struct rpc_cred gc_base;
25 struct auth_cred acred;
26};
27
28static struct rpc_auth generic_auth;
29static struct rpc_cred_cache generic_cred_cache;
30static const struct rpc_credops generic_credops;
31
32/*
33 * Public call interface
34 */
35struct rpc_cred *rpc_lookup_cred(void)
36{
37 return rpcauth_lookupcred(&generic_auth, 0);
38}
39EXPORT_SYMBOL_GPL(rpc_lookup_cred);
40
41/*
42 * Public call interface for looking up machine creds.
43 */
44struct rpc_cred *rpc_lookup_machine_cred(void)
45{
46 struct auth_cred acred = {
47 .uid = RPC_ANONYMOUS_USERID,
48 .gid = RPC_ANONYMOUS_GROUPID,
49 .machine_cred = 1,
50 };
51
52 dprintk("RPC: looking up machine cred\n");
53 return generic_auth.au_ops->lookup_cred(&generic_auth, &acred, 0);
54}
55EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
56
57static void
58generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
59{
60 struct rpc_auth *auth = task->tk_client->cl_auth;
61 struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
62 struct rpc_cred *ret;
63
64 ret = auth->au_ops->lookup_cred(auth, acred, 0);
65 if (!IS_ERR(ret))
66 task->tk_msg.rpc_cred = ret;
67 else
68 task->tk_status = PTR_ERR(ret);
69}
70
71/*
72 * Lookup generic creds for current process
73 */
74static struct rpc_cred *
75generic_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
76{
77 return rpcauth_lookup_credcache(&generic_auth, acred, flags);
78}
79
80static struct rpc_cred *
81generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
82{
83 struct generic_cred *gcred;
84
85 gcred = kmalloc(sizeof(*gcred), GFP_KERNEL);
86 if (gcred == NULL)
87 return ERR_PTR(-ENOMEM);
88
89 rpcauth_init_cred(&gcred->gc_base, acred, &generic_auth, &generic_credops);
90 gcred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
91
92 gcred->acred.uid = acred->uid;
93 gcred->acred.gid = acred->gid;
94 gcred->acred.group_info = acred->group_info;
95 if (gcred->acred.group_info != NULL)
96 get_group_info(gcred->acred.group_info);
97 gcred->acred.machine_cred = acred->machine_cred;
98
99 dprintk("RPC: allocated %s cred %p for uid %d gid %d\n",
100 gcred->acred.machine_cred ? "machine" : "generic",
101 gcred, acred->uid, acred->gid);
102 return &gcred->gc_base;
103}
104
105static void
106generic_free_cred(struct rpc_cred *cred)
107{
108 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
109
110 dprintk("RPC: generic_free_cred %p\n", gcred);
111 if (gcred->acred.group_info != NULL)
112 put_group_info(gcred->acred.group_info);
113 kfree(gcred);
114}
115
116static void
117generic_free_cred_callback(struct rcu_head *head)
118{
119 struct rpc_cred *cred = container_of(head, struct rpc_cred, cr_rcu);
120 generic_free_cred(cred);
121}
122
123static void
124generic_destroy_cred(struct rpc_cred *cred)
125{
126 call_rcu(&cred->cr_rcu, generic_free_cred_callback);
127}
128
129/*
130 * Match credentials against current process creds.
131 */
132static int
133generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
134{
135 struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
136
137 if (gcred->acred.uid != acred->uid ||
138 gcred->acred.gid != acred->gid ||
139 gcred->acred.group_info != acred->group_info ||
140 gcred->acred.machine_cred != acred->machine_cred)
141 return 0;
142 return 1;
143}
144
145void __init rpc_init_generic_auth(void)
146{
147 spin_lock_init(&generic_cred_cache.lock);
148}
149
150void __exit rpc_destroy_generic_auth(void)
151{
152 rpcauth_clear_credcache(&generic_cred_cache);
153}
154
155static struct rpc_cred_cache generic_cred_cache = {
156 {{ NULL, },},
157};
158
159static const struct rpc_authops generic_auth_ops = {
160 .owner = THIS_MODULE,
161 .au_name = "Generic",
162 .lookup_cred = generic_lookup_cred,
163 .crcreate = generic_create_cred,
164};
165
166static struct rpc_auth generic_auth = {
167 .au_ops = &generic_auth_ops,
168 .au_count = ATOMIC_INIT(0),
169 .au_credcache = &generic_cred_cache,
170};
171
172static const struct rpc_credops generic_credops = {
173 .cr_name = "Generic cred",
174 .crdestroy = generic_destroy_cred,
175 .crbind = generic_bind_cred,
176 .crmatch = generic_match,
177};
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 5828e5c060ca..cc12d5f5d5da 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -114,27 +114,14 @@ static void
114gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx) 114gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
115{ 115{
116 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 116 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
117 struct gss_cl_ctx *old;
118 117
119 old = gss_cred->gc_ctx; 118 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
119 return;
120 gss_get_ctx(ctx);
120 rcu_assign_pointer(gss_cred->gc_ctx, ctx); 121 rcu_assign_pointer(gss_cred->gc_ctx, ctx);
121 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags); 122 set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
123 smp_mb__before_clear_bit();
122 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags); 124 clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
123 if (old)
124 gss_put_ctx(old);
125}
126
127static int
128gss_cred_is_uptodate_ctx(struct rpc_cred *cred)
129{
130 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
131 int res = 0;
132
133 rcu_read_lock();
134 if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx)
135 res = 1;
136 rcu_read_unlock();
137 return res;
138} 125}
139 126
140static const void * 127static const void *
@@ -266,6 +253,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
266 BUG_ON(!list_empty(&gss_msg->list)); 253 BUG_ON(!list_empty(&gss_msg->list));
267 if (gss_msg->ctx != NULL) 254 if (gss_msg->ctx != NULL)
268 gss_put_ctx(gss_msg->ctx); 255 gss_put_ctx(gss_msg->ctx);
256 rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
269 kfree(gss_msg); 257 kfree(gss_msg);
270} 258}
271 259
@@ -339,7 +327,7 @@ gss_upcall_callback(struct rpc_task *task)
339 327
340 spin_lock(&inode->i_lock); 328 spin_lock(&inode->i_lock);
341 if (gss_msg->ctx) 329 if (gss_msg->ctx)
342 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx)); 330 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
343 else 331 else
344 task->tk_status = gss_msg->msg.errno; 332 task->tk_status = gss_msg->msg.errno;
345 gss_cred->gc_upcall = NULL; 333 gss_cred->gc_upcall = NULL;
@@ -370,9 +358,16 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
370static struct gss_upcall_msg * 358static struct gss_upcall_msg *
371gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred) 359gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
372{ 360{
361 struct gss_cred *gss_cred = container_of(cred,
362 struct gss_cred, gc_base);
373 struct gss_upcall_msg *gss_new, *gss_msg; 363 struct gss_upcall_msg *gss_new, *gss_msg;
364 uid_t uid = cred->cr_uid;
374 365
375 gss_new = gss_alloc_msg(gss_auth, cred->cr_uid); 366 /* Special case: rpc.gssd assumes that uid == 0 implies machine creds */
367 if (gss_cred->gc_machine_cred != 0)
368 uid = 0;
369
370 gss_new = gss_alloc_msg(gss_auth, uid);
376 if (gss_new == NULL) 371 if (gss_new == NULL)
377 return ERR_PTR(-ENOMEM); 372 return ERR_PTR(-ENOMEM);
378 gss_msg = gss_add_msg(gss_auth, gss_new); 373 gss_msg = gss_add_msg(gss_auth, gss_new);
@@ -408,13 +403,17 @@ gss_refresh_upcall(struct rpc_task *task)
408 } 403 }
409 spin_lock(&inode->i_lock); 404 spin_lock(&inode->i_lock);
410 if (gss_cred->gc_upcall != NULL) 405 if (gss_cred->gc_upcall != NULL)
411 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL); 406 rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
412 else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) { 407 else if (gss_msg->ctx != NULL) {
408 gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
409 gss_cred->gc_upcall = NULL;
410 rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
411 } else if (gss_msg->msg.errno >= 0) {
413 task->tk_timeout = 0; 412 task->tk_timeout = 0;
414 gss_cred->gc_upcall = gss_msg; 413 gss_cred->gc_upcall = gss_msg;
415 /* gss_upcall_callback will release the reference to gss_upcall_msg */ 414 /* gss_upcall_callback will release the reference to gss_upcall_msg */
416 atomic_inc(&gss_msg->count); 415 atomic_inc(&gss_msg->count);
417 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL); 416 rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
418 } else 417 } else
419 err = gss_msg->msg.errno; 418 err = gss_msg->msg.errno;
420 spin_unlock(&inode->i_lock); 419 spin_unlock(&inode->i_lock);
@@ -454,7 +453,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
454 schedule(); 453 schedule();
455 } 454 }
456 if (gss_msg->ctx) 455 if (gss_msg->ctx)
457 gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx)); 456 gss_cred_set_ctx(cred, gss_msg->ctx);
458 else 457 else
459 err = gss_msg->msg.errno; 458 err = gss_msg->msg.errno;
460 spin_unlock(&inode->i_lock); 459 spin_unlock(&inode->i_lock);
@@ -709,7 +708,7 @@ gss_destroying_context(struct rpc_cred *cred)
709 struct rpc_task *task; 708 struct rpc_task *task;
710 709
711 if (gss_cred->gc_ctx == NULL || 710 if (gss_cred->gc_ctx == NULL ||
712 gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY) 711 test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
713 return 0; 712 return 0;
714 713
715 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY; 714 gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
@@ -719,7 +718,7 @@ gss_destroying_context(struct rpc_cred *cred)
719 * by the RPC call or by the put_rpccred() below */ 718 * by the RPC call or by the put_rpccred() below */
720 get_rpccred(cred); 719 get_rpccred(cred);
721 720
722 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC); 721 task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
723 if (!IS_ERR(task)) 722 if (!IS_ERR(task))
724 rpc_put_task(task); 723 rpc_put_task(task);
725 724
@@ -817,6 +816,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
817 */ 816 */
818 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW; 817 cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
819 cred->gc_service = gss_auth->service; 818 cred->gc_service = gss_auth->service;
819 cred->gc_machine_cred = acred->machine_cred;
820 kref_get(&gss_auth->kref); 820 kref_get(&gss_auth->kref);
821 return &cred->gc_base; 821 return &cred->gc_base;
822 822
@@ -843,17 +843,16 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
843{ 843{
844 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base); 844 struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
845 845
846 /* 846 if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
847 * If the searchflags have set RPCAUTH_LOOKUP_NEW, then
848 * we don't really care if the credential has expired or not,
849 * since the caller should be prepared to reinitialise it.
850 */
851 if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
852 goto out; 847 goto out;
853 /* Don't match with creds that have expired. */ 848 /* Don't match with creds that have expired. */
854 if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry)) 849 if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
850 return 0;
851 if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
855 return 0; 852 return 0;
856out: 853out:
854 if (acred->machine_cred != gss_cred->gc_machine_cred)
855 return 0;
857 return (rc->cr_uid == acred->uid); 856 return (rc->cr_uid == acred->uid);
858} 857}
859 858
@@ -917,16 +916,48 @@ out_put_ctx:
917 return NULL; 916 return NULL;
918} 917}
919 918
919static int gss_renew_cred(struct rpc_task *task)
920{
921 struct rpc_cred *oldcred = task->tk_msg.rpc_cred;
922 struct gss_cred *gss_cred = container_of(oldcred,
923 struct gss_cred,
924 gc_base);
925 struct rpc_auth *auth = oldcred->cr_auth;
926 struct auth_cred acred = {
927 .uid = oldcred->cr_uid,
928 .machine_cred = gss_cred->gc_machine_cred,
929 };
930 struct rpc_cred *new;
931
932 new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
933 if (IS_ERR(new))
934 return PTR_ERR(new);
935 task->tk_msg.rpc_cred = new;
936 put_rpccred(oldcred);
937 return 0;
938}
939
920/* 940/*
921* Refresh credentials. XXX - finish 941* Refresh credentials. XXX - finish
922*/ 942*/
923static int 943static int
924gss_refresh(struct rpc_task *task) 944gss_refresh(struct rpc_task *task)
925{ 945{
946 struct rpc_cred *cred = task->tk_msg.rpc_cred;
947 int ret = 0;
948
949 if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
950 !test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
951 ret = gss_renew_cred(task);
952 if (ret < 0)
953 goto out;
954 cred = task->tk_msg.rpc_cred;
955 }
926 956
927 if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred)) 957 if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
928 return gss_refresh_upcall(task); 958 ret = gss_refresh_upcall(task);
929 return 0; 959out:
960 return ret;
930} 961}
931 962
932/* Dummy refresh routine: used only when destroying the context */ 963/* Dummy refresh routine: used only when destroying the context */
@@ -1286,9 +1317,7 @@ out:
1286static const struct rpc_authops authgss_ops = { 1317static const struct rpc_authops authgss_ops = {
1287 .owner = THIS_MODULE, 1318 .owner = THIS_MODULE,
1288 .au_flavor = RPC_AUTH_GSS, 1319 .au_flavor = RPC_AUTH_GSS,
1289#ifdef RPC_DEBUG
1290 .au_name = "RPCSEC_GSS", 1320 .au_name = "RPCSEC_GSS",
1291#endif
1292 .create = gss_create, 1321 .create = gss_create,
1293 .destroy = gss_destroy, 1322 .destroy = gss_destroy,
1294 .lookup_cred = gss_lookup_cred, 1323 .lookup_cred = gss_lookup_cred,
@@ -1299,6 +1328,7 @@ static const struct rpc_credops gss_credops = {
1299 .cr_name = "AUTH_GSS", 1328 .cr_name = "AUTH_GSS",
1300 .crdestroy = gss_destroy_cred, 1329 .crdestroy = gss_destroy_cred,
1301 .cr_init = gss_cred_init, 1330 .cr_init = gss_cred_init,
1331 .crbind = rpcauth_generic_bind_cred,
1302 .crmatch = gss_match, 1332 .crmatch = gss_match,
1303 .crmarshal = gss_marshal, 1333 .crmarshal = gss_marshal,
1304 .crrefresh = gss_refresh, 1334 .crrefresh = gss_refresh,
@@ -1310,6 +1340,7 @@ static const struct rpc_credops gss_credops = {
1310static const struct rpc_credops gss_nullops = { 1340static const struct rpc_credops gss_nullops = {
1311 .cr_name = "AUTH_GSS", 1341 .cr_name = "AUTH_GSS",
1312 .crdestroy = gss_destroy_cred, 1342 .crdestroy = gss_destroy_cred,
1343 .crbind = rpcauth_generic_bind_cred,
1313 .crmatch = gss_match, 1344 .crmatch = gss_match,
1314 .crmarshal = gss_marshal, 1345 .crmarshal = gss_marshal,
1315 .crrefresh = gss_refresh_null, 1346 .crrefresh = gss_refresh_null,
diff --git a/net/sunrpc/auth_gss/gss_generic_token.c b/net/sunrpc/auth_gss/gss_generic_token.c
index ea8c92ecdae5..d83b881685fe 100644
--- a/net/sunrpc/auth_gss/gss_generic_token.c
+++ b/net/sunrpc/auth_gss/gss_generic_token.c
@@ -148,7 +148,7 @@ int
148g_token_size(struct xdr_netobj *mech, unsigned int body_size) 148g_token_size(struct xdr_netobj *mech, unsigned int body_size)
149{ 149{
150 /* set body_size to sequence contents size */ 150 /* set body_size to sequence contents size */
151 body_size += 4 + (int) mech->len; /* NEED overflow check */ 151 body_size += 2 + (int) mech->len; /* NEED overflow check */
152 return(1 + der_length_size(body_size) + body_size); 152 return(1 + der_length_size(body_size) + body_size);
153} 153}
154 154
@@ -161,7 +161,7 @@ void
161g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf) 161g_make_token_header(struct xdr_netobj *mech, int body_size, unsigned char **buf)
162{ 162{
163 *(*buf)++ = 0x60; 163 *(*buf)++ = 0x60;
164 der_write_length(buf, 4 + mech->len + body_size); 164 der_write_length(buf, 2 + mech->len + body_size);
165 *(*buf)++ = 0x06; 165 *(*buf)++ = 0x06;
166 *(*buf)++ = (unsigned char) mech->len; 166 *(*buf)++ = (unsigned char) mech->len;
167 TWRITE_STR(*buf, mech->data, ((int) mech->len)); 167 TWRITE_STR(*buf, mech->data, ((int) mech->len));
diff --git a/net/sunrpc/auth_gss/gss_krb5_crypto.c b/net/sunrpc/auth_gss/gss_krb5_crypto.c
index 0dd792338fa9..1d52308ca324 100644
--- a/net/sunrpc/auth_gss/gss_krb5_crypto.c
+++ b/net/sunrpc/auth_gss/gss_krb5_crypto.c
@@ -66,8 +66,8 @@ krb5_encrypt(
66 goto out; 66 goto out;
67 67
68 if (crypto_blkcipher_ivsize(tfm) > 16) { 68 if (crypto_blkcipher_ivsize(tfm) > 16) {
69 dprintk("RPC: gss_k5encrypt: tfm iv size to large %d\n", 69 dprintk("RPC: gss_k5encrypt: tfm iv size too large %d\n",
70 crypto_blkcipher_ivsize(tfm)); 70 crypto_blkcipher_ivsize(tfm));
71 goto out; 71 goto out;
72 } 72 }
73 73
@@ -102,7 +102,7 @@ krb5_decrypt(
102 goto out; 102 goto out;
103 103
104 if (crypto_blkcipher_ivsize(tfm) > 16) { 104 if (crypto_blkcipher_ivsize(tfm) > 16) {
105 dprintk("RPC: gss_k5decrypt: tfm iv size to large %d\n", 105 dprintk("RPC: gss_k5decrypt: tfm iv size too large %d\n",
106 crypto_blkcipher_ivsize(tfm)); 106 crypto_blkcipher_ivsize(tfm));
107 goto out; 107 goto out;
108 } 108 }
diff --git a/net/sunrpc/auth_gss/gss_krb5_seal.c b/net/sunrpc/auth_gss/gss_krb5_seal.c
index dedcbd6108f4..5f1d36dfbcf7 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seal.c
@@ -87,10 +87,10 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
87 87
88 now = get_seconds(); 88 now = get_seconds();
89 89
90 token->len = g_token_size(&ctx->mech_used, 22); 90 token->len = g_token_size(&ctx->mech_used, 24);
91 91
92 ptr = token->data; 92 ptr = token->data;
93 g_make_token_header(&ctx->mech_used, 22, &ptr); 93 g_make_token_header(&ctx->mech_used, 24, &ptr);
94 94
95 *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff); 95 *ptr++ = (unsigned char) ((KG_TOK_MIC_MSG>>8)&0xff);
96 *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff); 96 *ptr++ = (unsigned char) (KG_TOK_MIC_MSG&0xff);
@@ -109,15 +109,14 @@ gss_get_mic_kerberos(struct gss_ctx *gss_ctx, struct xdr_buf *text,
109 md5cksum.data, md5cksum.len)) 109 md5cksum.data, md5cksum.len))
110 return GSS_S_FAILURE; 110 return GSS_S_FAILURE;
111 111
112 memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH, 112 memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8);
113 KRB5_CKSUM_LENGTH);
114 113
115 spin_lock(&krb5_seq_lock); 114 spin_lock(&krb5_seq_lock);
116 seq_send = ctx->seq_send++; 115 seq_send = ctx->seq_send++;
117 spin_unlock(&krb5_seq_lock); 116 spin_unlock(&krb5_seq_lock);
118 117
119 if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff, 118 if (krb5_make_seq_num(ctx->seq, ctx->initiate ? 0 : 0xff,
120 ctx->seq_send, krb5_hdr + 16, krb5_hdr + 8)) 119 seq_send, krb5_hdr + 16, krb5_hdr + 8))
121 return GSS_S_FAILURE; 120 return GSS_S_FAILURE;
122 121
123 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; 122 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE;
diff --git a/net/sunrpc/auth_gss/gss_krb5_seqnum.c b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
index 43f3421f1e6a..f160be6c1a46 100644
--- a/net/sunrpc/auth_gss/gss_krb5_seqnum.c
+++ b/net/sunrpc/auth_gss/gss_krb5_seqnum.c
@@ -43,7 +43,7 @@
43s32 43s32
44krb5_make_seq_num(struct crypto_blkcipher *key, 44krb5_make_seq_num(struct crypto_blkcipher *key,
45 int direction, 45 int direction,
46 s32 seqnum, 46 u32 seqnum,
47 unsigned char *cksum, unsigned char *buf) 47 unsigned char *cksum, unsigned char *buf)
48{ 48{
49 unsigned char plain[8]; 49 unsigned char plain[8];
@@ -65,7 +65,7 @@ s32
65krb5_get_seq_num(struct crypto_blkcipher *key, 65krb5_get_seq_num(struct crypto_blkcipher *key,
66 unsigned char *cksum, 66 unsigned char *cksum,
67 unsigned char *buf, 67 unsigned char *buf,
68 int *direction, s32 * seqnum) 68 int *direction, u32 *seqnum)
69{ 69{
70 s32 code; 70 s32 code;
71 unsigned char plain[8]; 71 unsigned char plain[8];
diff --git a/net/sunrpc/auth_gss/gss_krb5_unseal.c b/net/sunrpc/auth_gss/gss_krb5_unseal.c
index e30a993466bc..d91a5d004803 100644
--- a/net/sunrpc/auth_gss/gss_krb5_unseal.c
+++ b/net/sunrpc/auth_gss/gss_krb5_unseal.c
@@ -82,7 +82,7 @@ gss_verify_mic_kerberos(struct gss_ctx *gss_ctx,
82 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; 82 struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata};
83 s32 now; 83 s32 now;
84 int direction; 84 int direction;
85 s32 seqnum; 85 u32 seqnum;
86 unsigned char *ptr = (unsigned char *)read_token->data; 86 unsigned char *ptr = (unsigned char *)read_token->data;
87 int bodysize; 87 int bodysize;
88 88
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 3bdc527ee64a..b00b1b426301 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -137,7 +137,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
137 BUG_ON((buf->len - offset) % blocksize); 137 BUG_ON((buf->len - offset) % blocksize);
138 plainlen = blocksize + buf->len - offset; 138 plainlen = blocksize + buf->len - offset;
139 139
140 headlen = g_token_size(&kctx->mech_used, 22 + plainlen) - 140 headlen = g_token_size(&kctx->mech_used, 24 + plainlen) -
141 (buf->len - offset); 141 (buf->len - offset);
142 142
143 ptr = buf->head[0].iov_base + offset; 143 ptr = buf->head[0].iov_base + offset;
@@ -149,7 +149,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
149 buf->len += headlen; 149 buf->len += headlen;
150 BUG_ON((buf->len - offset - headlen) % blocksize); 150 BUG_ON((buf->len - offset - headlen) % blocksize);
151 151
152 g_make_token_header(&kctx->mech_used, 22 + plainlen, &ptr); 152 g_make_token_header(&kctx->mech_used, 24 + plainlen, &ptr);
153 153
154 154
155 *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff); 155 *ptr++ = (unsigned char) ((KG_TOK_WRAP_MSG>>8)&0xff);
@@ -176,9 +176,7 @@ gss_wrap_kerberos(struct gss_ctx *ctx, int offset,
176 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data, 176 if (krb5_encrypt(kctx->seq, NULL, md5cksum.data,
177 md5cksum.data, md5cksum.len)) 177 md5cksum.data, md5cksum.len))
178 return GSS_S_FAILURE; 178 return GSS_S_FAILURE;
179 memcpy(krb5_hdr + 16, 179 memcpy(krb5_hdr + 16, md5cksum.data + md5cksum.len - 8, 8);
180 md5cksum.data + md5cksum.len - KRB5_CKSUM_LENGTH,
181 KRB5_CKSUM_LENGTH);
182 180
183 spin_lock(&krb5_seq_lock); 181 spin_lock(&krb5_seq_lock);
184 seq_send = kctx->seq_send++; 182 seq_send = kctx->seq_send++;
diff --git a/net/sunrpc/auth_gss/gss_spkm3_seal.c b/net/sunrpc/auth_gss/gss_spkm3_seal.c
index abf17ce2e3b1..c832712f8d55 100644
--- a/net/sunrpc/auth_gss/gss_spkm3_seal.c
+++ b/net/sunrpc/auth_gss/gss_spkm3_seal.c
@@ -107,10 +107,10 @@ spkm3_make_token(struct spkm3_ctx *ctx,
107 tokenlen = 10 + ctxelen + 1 + md5elen + 1; 107 tokenlen = 10 + ctxelen + 1 + md5elen + 1;
108 108
109 /* Create token header using generic routines */ 109 /* Create token header using generic routines */
110 token->len = g_token_size(&ctx->mech_used, tokenlen); 110 token->len = g_token_size(&ctx->mech_used, tokenlen + 2);
111 111
112 ptr = token->data; 112 ptr = token->data;
113 g_make_token_header(&ctx->mech_used, tokenlen, &ptr); 113 g_make_token_header(&ctx->mech_used, tokenlen + 2, &ptr);
114 114
115 spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); 115 spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
116 } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ 116 } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 481f984e9a22..5905d56737d6 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1146,7 +1146,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1146 case RPC_GSS_SVC_INTEGRITY: 1146 case RPC_GSS_SVC_INTEGRITY:
1147 if (unwrap_integ_data(&rqstp->rq_arg, 1147 if (unwrap_integ_data(&rqstp->rq_arg,
1148 gc->gc_seq, rsci->mechctx)) 1148 gc->gc_seq, rsci->mechctx))
1149 goto auth_err; 1149 goto garbage_args;
1150 /* placeholders for length and seq. number: */ 1150 /* placeholders for length and seq. number: */
1151 svc_putnl(resv, 0); 1151 svc_putnl(resv, 0);
1152 svc_putnl(resv, 0); 1152 svc_putnl(resv, 0);
@@ -1154,7 +1154,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1154 case RPC_GSS_SVC_PRIVACY: 1154 case RPC_GSS_SVC_PRIVACY:
1155 if (unwrap_priv_data(rqstp, &rqstp->rq_arg, 1155 if (unwrap_priv_data(rqstp, &rqstp->rq_arg,
1156 gc->gc_seq, rsci->mechctx)) 1156 gc->gc_seq, rsci->mechctx))
1157 goto auth_err; 1157 goto garbage_args;
1158 /* placeholders for length and seq. number: */ 1158 /* placeholders for length and seq. number: */
1159 svc_putnl(resv, 0); 1159 svc_putnl(resv, 0);
1160 svc_putnl(resv, 0); 1160 svc_putnl(resv, 0);
@@ -1169,6 +1169,11 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1169 ret = SVC_OK; 1169 ret = SVC_OK;
1170 goto out; 1170 goto out;
1171 } 1171 }
1172garbage_args:
1173 /* Restore write pointer to its original value: */
1174 xdr_ressize_check(rqstp, reject_stat);
1175 ret = SVC_GARBAGE;
1176 goto out;
1172auth_err: 1177auth_err:
1173 /* Restore write pointer to its original value: */ 1178 /* Restore write pointer to its original value: */
1174 xdr_ressize_check(rqstp, reject_stat); 1179 xdr_ressize_check(rqstp, reject_stat);
diff --git a/net/sunrpc/auth_null.c b/net/sunrpc/auth_null.c
index 537d0e8589dd..c70dd7f5258e 100644
--- a/net/sunrpc/auth_null.c
+++ b/net/sunrpc/auth_null.c
@@ -104,9 +104,7 @@ nul_validate(struct rpc_task *task, __be32 *p)
104const struct rpc_authops authnull_ops = { 104const struct rpc_authops authnull_ops = {
105 .owner = THIS_MODULE, 105 .owner = THIS_MODULE,
106 .au_flavor = RPC_AUTH_NULL, 106 .au_flavor = RPC_AUTH_NULL,
107#ifdef RPC_DEBUG
108 .au_name = "NULL", 107 .au_name = "NULL",
109#endif
110 .create = nul_create, 108 .create = nul_create,
111 .destroy = nul_destroy, 109 .destroy = nul_destroy,
112 .lookup_cred = nul_lookup_cred, 110 .lookup_cred = nul_lookup_cred,
@@ -125,6 +123,7 @@ static
125const struct rpc_credops null_credops = { 123const struct rpc_credops null_credops = {
126 .cr_name = "AUTH_NULL", 124 .cr_name = "AUTH_NULL",
127 .crdestroy = nul_destroy_cred, 125 .crdestroy = nul_destroy_cred,
126 .crbind = rpcauth_generic_bind_cred,
128 .crmatch = nul_match, 127 .crmatch = nul_match,
129 .crmarshal = nul_marshal, 128 .crmarshal = nul_marshal,
130 .crrefresh = nul_refresh, 129 .crrefresh = nul_refresh,
diff --git a/net/sunrpc/auth_unix.c b/net/sunrpc/auth_unix.c
index 5ed91e5bcee4..44920b90bdc4 100644
--- a/net/sunrpc/auth_unix.c
+++ b/net/sunrpc/auth_unix.c
@@ -60,7 +60,8 @@ static struct rpc_cred *
60unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags) 60unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
61{ 61{
62 struct unx_cred *cred; 62 struct unx_cred *cred;
63 int i; 63 unsigned int groups = 0;
64 unsigned int i;
64 65
65 dprintk("RPC: allocating UNIX cred for uid %d gid %d\n", 66 dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
66 acred->uid, acred->gid); 67 acred->uid, acred->gid);
@@ -70,21 +71,17 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
70 71
71 rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops); 72 rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops);
72 cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE; 73 cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
73 if (flags & RPCAUTH_LOOKUP_ROOTCREDS) { 74
74 cred->uc_uid = 0; 75 if (acred->group_info != NULL)
75 cred->uc_gid = 0; 76 groups = acred->group_info->ngroups;
76 cred->uc_gids[0] = NOGROUP; 77 if (groups > NFS_NGROUPS)
77 } else { 78 groups = NFS_NGROUPS;
78 int groups = acred->group_info->ngroups; 79
79 if (groups > NFS_NGROUPS) 80 cred->uc_gid = acred->gid;
80 groups = NFS_NGROUPS; 81 for (i = 0; i < groups; i++)
81 82 cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
82 cred->uc_gid = acred->gid; 83 if (i < NFS_NGROUPS)
83 for (i = 0; i < groups; i++) 84 cred->uc_gids[i] = NOGROUP;
84 cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
85 if (i < NFS_NGROUPS)
86 cred->uc_gids[i] = NOGROUP;
87 }
88 85
89 return &cred->uc_base; 86 return &cred->uc_base;
90} 87}
@@ -118,26 +115,21 @@ static int
118unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags) 115unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
119{ 116{
120 struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base); 117 struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base);
121 int i; 118 unsigned int groups = 0;
119 unsigned int i;
122 120
123 if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) {
124 int groups;
125 121
126 if (cred->uc_uid != acred->uid 122 if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid)
127 || cred->uc_gid != acred->gid) 123 return 0;
128 return 0;
129 124
125 if (acred->group_info != NULL)
130 groups = acred->group_info->ngroups; 126 groups = acred->group_info->ngroups;
131 if (groups > NFS_NGROUPS) 127 if (groups > NFS_NGROUPS)
132 groups = NFS_NGROUPS; 128 groups = NFS_NGROUPS;
133 for (i = 0; i < groups ; i++) 129 for (i = 0; i < groups ; i++)
134 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i)) 130 if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
135 return 0; 131 return 0;
136 return 1; 132 return 1;
137 }
138 return (cred->uc_uid == 0
139 && cred->uc_gid == 0
140 && cred->uc_gids[0] == (gid_t) NOGROUP);
141} 133}
142 134
143/* 135/*
@@ -218,9 +210,7 @@ void __init rpc_init_authunix(void)
218const struct rpc_authops authunix_ops = { 210const struct rpc_authops authunix_ops = {
219 .owner = THIS_MODULE, 211 .owner = THIS_MODULE,
220 .au_flavor = RPC_AUTH_UNIX, 212 .au_flavor = RPC_AUTH_UNIX,
221#ifdef RPC_DEBUG
222 .au_name = "UNIX", 213 .au_name = "UNIX",
223#endif
224 .create = unx_create, 214 .create = unx_create,
225 .destroy = unx_destroy, 215 .destroy = unx_destroy,
226 .lookup_cred = unx_lookup_cred, 216 .lookup_cred = unx_lookup_cred,
@@ -245,6 +235,7 @@ static
245const struct rpc_credops unix_credops = { 235const struct rpc_credops unix_credops = {
246 .cr_name = "AUTH_UNIX", 236 .cr_name = "AUTH_UNIX",
247 .crdestroy = unx_destroy_cred, 237 .crdestroy = unx_destroy_cred,
238 .crbind = rpcauth_generic_bind_cred,
248 .crmatch = unx_match, 239 .crmatch = unx_match,
249 .crmarshal = unx_marshal, 240 .crmarshal = unx_marshal,
250 .crrefresh = unx_refresh, 241 .crrefresh = unx_refresh,
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index b5f2786251b9..d75530ff2a6d 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -571,7 +571,6 @@ static int cache_defer_req(struct cache_req *req, struct cache_head *item)
571 return -ETIMEDOUT; 571 return -ETIMEDOUT;
572 572
573 dreq->item = item; 573 dreq->item = item;
574 dreq->recv_time = get_seconds();
575 574
576 spin_lock(&cache_defer_lock); 575 spin_lock(&cache_defer_lock);
577 576
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7b96ff38002f..8945307556ec 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -544,7 +544,7 @@ EXPORT_SYMBOL_GPL(rpc_run_task);
544 * @msg: RPC call parameters 544 * @msg: RPC call parameters
545 * @flags: RPC call flags 545 * @flags: RPC call flags
546 */ 546 */
547int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags) 547int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
548{ 548{
549 struct rpc_task *task; 549 struct rpc_task *task;
550 struct rpc_task_setup task_setup_data = { 550 struct rpc_task_setup task_setup_data = {
@@ -575,7 +575,7 @@ EXPORT_SYMBOL_GPL(rpc_call_sync);
575 * @data: user call data 575 * @data: user call data
576 */ 576 */
577int 577int
578rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags, 578rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
579 const struct rpc_call_ops *tk_ops, void *data) 579 const struct rpc_call_ops *tk_ops, void *data)
580{ 580{
581 struct rpc_task *task; 581 struct rpc_task *task;
@@ -1062,7 +1062,7 @@ call_transmit(struct rpc_task *task)
1062 if (task->tk_msg.rpc_proc->p_decode != NULL) 1062 if (task->tk_msg.rpc_proc->p_decode != NULL)
1063 return; 1063 return;
1064 task->tk_action = rpc_exit_task; 1064 task->tk_action = rpc_exit_task;
1065 rpc_wake_up_task(task); 1065 rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
1066} 1066}
1067 1067
1068/* 1068/*
@@ -1116,7 +1116,8 @@ call_status(struct rpc_task *task)
1116 case -ETIMEDOUT: 1116 case -ETIMEDOUT:
1117 task->tk_action = call_timeout; 1117 task->tk_action = call_timeout;
1118 if (task->tk_client->cl_discrtry) 1118 if (task->tk_client->cl_discrtry)
1119 xprt_force_disconnect(task->tk_xprt); 1119 xprt_conditional_disconnect(task->tk_xprt,
1120 req->rq_connect_cookie);
1120 break; 1121 break;
1121 case -ECONNREFUSED: 1122 case -ECONNREFUSED:
1122 case -ENOTCONN: 1123 case -ENOTCONN:
@@ -1168,6 +1169,11 @@ call_timeout(struct rpc_task *task)
1168 clnt->cl_protname, clnt->cl_server); 1169 clnt->cl_protname, clnt->cl_server);
1169 } 1170 }
1170 rpc_force_rebind(clnt); 1171 rpc_force_rebind(clnt);
1172 /*
1173 * Did our request time out due to an RPCSEC_GSS out-of-sequence
1174 * event? RFC2203 requires the server to drop all such requests.
1175 */
1176 rpcauth_invalcred(task);
1171 1177
1172retry: 1178retry:
1173 clnt->cl_stats->rpcretrans++; 1179 clnt->cl_stats->rpcretrans++;
@@ -1195,18 +1201,6 @@ call_decode(struct rpc_task *task)
1195 task->tk_flags &= ~RPC_CALL_MAJORSEEN; 1201 task->tk_flags &= ~RPC_CALL_MAJORSEEN;
1196 } 1202 }
1197 1203
1198 if (task->tk_status < 12) {
1199 if (!RPC_IS_SOFT(task)) {
1200 task->tk_action = call_bind;
1201 clnt->cl_stats->rpcretrans++;
1202 goto out_retry;
1203 }
1204 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
1205 clnt->cl_protname, task->tk_status);
1206 task->tk_action = call_timeout;
1207 goto out_retry;
1208 }
1209
1210 /* 1204 /*
1211 * Ensure that we see all writes made by xprt_complete_rqst() 1205 * Ensure that we see all writes made by xprt_complete_rqst()
1212 * before it changed req->rq_received. 1206 * before it changed req->rq_received.
@@ -1218,6 +1212,18 @@ call_decode(struct rpc_task *task)
1218 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, 1212 WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
1219 sizeof(req->rq_rcv_buf)) != 0); 1213 sizeof(req->rq_rcv_buf)) != 0);
1220 1214
1215 if (req->rq_rcv_buf.len < 12) {
1216 if (!RPC_IS_SOFT(task)) {
1217 task->tk_action = call_bind;
1218 clnt->cl_stats->rpcretrans++;
1219 goto out_retry;
1220 }
1221 dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
1222 clnt->cl_protname, task->tk_status);
1223 task->tk_action = call_timeout;
1224 goto out_retry;
1225 }
1226
1221 /* Verify the RPC header */ 1227 /* Verify the RPC header */
1222 p = call_verify(task); 1228 p = call_verify(task);
1223 if (IS_ERR(p)) { 1229 if (IS_ERR(p)) {
@@ -1236,10 +1242,14 @@ call_decode(struct rpc_task *task)
1236 task->tk_status); 1242 task->tk_status);
1237 return; 1243 return;
1238out_retry: 1244out_retry:
1239 req->rq_received = req->rq_private_buf.len = 0;
1240 task->tk_status = 0; 1245 task->tk_status = 0;
1241 if (task->tk_client->cl_discrtry) 1246 /* Note: call_verify() may have freed the RPC slot */
1242 xprt_force_disconnect(task->tk_xprt); 1247 if (task->tk_rqstp == req) {
1248 req->rq_received = req->rq_rcv_buf.len = 0;
1249 if (task->tk_client->cl_discrtry)
1250 xprt_conditional_disconnect(task->tk_xprt,
1251 req->rq_connect_cookie);
1252 }
1243} 1253}
1244 1254
1245/* 1255/*
@@ -1531,7 +1541,7 @@ void rpc_show_tasks(void)
1531 proc = -1; 1541 proc = -1;
1532 1542
1533 if (RPC_IS_QUEUED(t)) 1543 if (RPC_IS_QUEUED(t))
1534 rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq); 1544 rpc_waitq = rpc_qname(t->tk_waitqueue);
1535 1545
1536 printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n", 1546 printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n",
1537 t->tk_pid, proc, 1547 t->tk_pid, proc,
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 56aa018dce3a..0517967a68bf 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -298,7 +298,7 @@ void rpcb_getport_async(struct rpc_task *task)
298 298
299 /* Put self on queue before sending rpcbind request, in case 299 /* Put self on queue before sending rpcbind request, in case
300 * rpcb_getport_done completes before we return from rpc_run_task */ 300 * rpcb_getport_done completes before we return from rpc_run_task */
301 rpc_sleep_on(&xprt->binding, task, NULL, NULL); 301 rpc_sleep_on(&xprt->binding, task, NULL);
302 302
303 /* Someone else may have bound if we slept */ 303 /* Someone else may have bound if we slept */
304 if (xprt_bound(xprt)) { 304 if (xprt_bound(xprt)) {
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index 4c669121e607..6eab9bf94baf 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -38,9 +38,9 @@ static struct kmem_cache *rpc_buffer_slabp __read_mostly;
38static mempool_t *rpc_task_mempool __read_mostly; 38static mempool_t *rpc_task_mempool __read_mostly;
39static mempool_t *rpc_buffer_mempool __read_mostly; 39static mempool_t *rpc_buffer_mempool __read_mostly;
40 40
41static void __rpc_default_timer(struct rpc_task *task);
42static void rpc_async_schedule(struct work_struct *); 41static void rpc_async_schedule(struct work_struct *);
43static void rpc_release_task(struct rpc_task *task); 42static void rpc_release_task(struct rpc_task *task);
43static void __rpc_queue_timer_fn(unsigned long ptr);
44 44
45/* 45/*
46 * RPC tasks sit here while waiting for conditions to improve. 46 * RPC tasks sit here while waiting for conditions to improve.
@@ -57,41 +57,30 @@ struct workqueue_struct *rpciod_workqueue;
57 * queue->lock and bh_disabled in order to avoid races within 57 * queue->lock and bh_disabled in order to avoid races within
58 * rpc_run_timer(). 58 * rpc_run_timer().
59 */ 59 */
60static inline void 60static void
61__rpc_disable_timer(struct rpc_task *task) 61__rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
62{ 62{
63 if (task->tk_timeout == 0)
64 return;
63 dprintk("RPC: %5u disabling timer\n", task->tk_pid); 65 dprintk("RPC: %5u disabling timer\n", task->tk_pid);
64 task->tk_timeout_fn = NULL;
65 task->tk_timeout = 0; 66 task->tk_timeout = 0;
67 list_del(&task->u.tk_wait.timer_list);
68 if (list_empty(&queue->timer_list.list))
69 del_timer(&queue->timer_list.timer);
66} 70}
67 71
68/* 72static void
69 * Run a timeout function. 73rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires)
70 * We use the callback in order to allow __rpc_wake_up_task()
71 * and friends to disable the timer synchronously on SMP systems
72 * without calling del_timer_sync(). The latter could cause a
73 * deadlock if called while we're holding spinlocks...
74 */
75static void rpc_run_timer(struct rpc_task *task)
76{ 74{
77 void (*callback)(struct rpc_task *); 75 queue->timer_list.expires = expires;
78 76 mod_timer(&queue->timer_list.timer, expires);
79 callback = task->tk_timeout_fn;
80 task->tk_timeout_fn = NULL;
81 if (callback && RPC_IS_QUEUED(task)) {
82 dprintk("RPC: %5u running timer\n", task->tk_pid);
83 callback(task);
84 }
85 smp_mb__before_clear_bit();
86 clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
87 smp_mb__after_clear_bit();
88} 77}
89 78
90/* 79/*
91 * Set up a timer for the current task. 80 * Set up a timer for the current task.
92 */ 81 */
93static inline void 82static void
94__rpc_add_timer(struct rpc_task *task, rpc_action timer) 83__rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task)
95{ 84{
96 if (!task->tk_timeout) 85 if (!task->tk_timeout)
97 return; 86 return;
@@ -99,27 +88,10 @@ __rpc_add_timer(struct rpc_task *task, rpc_action timer)
99 dprintk("RPC: %5u setting alarm for %lu ms\n", 88 dprintk("RPC: %5u setting alarm for %lu ms\n",
100 task->tk_pid, task->tk_timeout * 1000 / HZ); 89 task->tk_pid, task->tk_timeout * 1000 / HZ);
101 90
102 if (timer) 91 task->u.tk_wait.expires = jiffies + task->tk_timeout;
103 task->tk_timeout_fn = timer; 92 if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.expires))
104 else 93 rpc_set_queue_timer(queue, task->u.tk_wait.expires);
105 task->tk_timeout_fn = __rpc_default_timer; 94 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list);
106 set_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate);
107 mod_timer(&task->tk_timer, jiffies + task->tk_timeout);
108}
109
110/*
111 * Delete any timer for the current task. Because we use del_timer_sync(),
112 * this function should never be called while holding queue->lock.
113 */
114static void
115rpc_delete_timer(struct rpc_task *task)
116{
117 if (RPC_IS_QUEUED(task))
118 return;
119 if (test_and_clear_bit(RPC_TASK_HAS_TIMER, &task->tk_runstate)) {
120 del_singleshot_timer_sync(&task->tk_timer);
121 dprintk("RPC: %5u deleting timer\n", task->tk_pid);
122 }
123} 95}
124 96
125/* 97/*
@@ -161,7 +133,7 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *
161 list_add(&task->u.tk_wait.list, &queue->tasks[0]); 133 list_add(&task->u.tk_wait.list, &queue->tasks[0]);
162 else 134 else
163 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]); 135 list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);
164 task->u.tk_wait.rpc_waitq = queue; 136 task->tk_waitqueue = queue;
165 queue->qlen++; 137 queue->qlen++;
166 rpc_set_queued(task); 138 rpc_set_queued(task);
167 139
@@ -181,22 +153,18 @@ static void __rpc_remove_wait_queue_priority(struct rpc_task *task)
181 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list); 153 list_move(&t->u.tk_wait.list, &task->u.tk_wait.list);
182 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links); 154 list_splice_init(&task->u.tk_wait.links, &t->u.tk_wait.links);
183 } 155 }
184 list_del(&task->u.tk_wait.list);
185} 156}
186 157
187/* 158/*
188 * Remove request from queue. 159 * Remove request from queue.
189 * Note: must be called with spin lock held. 160 * Note: must be called with spin lock held.
190 */ 161 */
191static void __rpc_remove_wait_queue(struct rpc_task *task) 162static void __rpc_remove_wait_queue(struct rpc_wait_queue *queue, struct rpc_task *task)
192{ 163{
193 struct rpc_wait_queue *queue; 164 __rpc_disable_timer(queue, task);
194 queue = task->u.tk_wait.rpc_waitq;
195
196 if (RPC_IS_PRIORITY(queue)) 165 if (RPC_IS_PRIORITY(queue))
197 __rpc_remove_wait_queue_priority(task); 166 __rpc_remove_wait_queue_priority(task);
198 else 167 list_del(&task->u.tk_wait.list);
199 list_del(&task->u.tk_wait.list);
200 queue->qlen--; 168 queue->qlen--;
201 dprintk("RPC: %5u removed from queue %p \"%s\"\n", 169 dprintk("RPC: %5u removed from queue %p \"%s\"\n",
202 task->tk_pid, queue, rpc_qname(queue)); 170 task->tk_pid, queue, rpc_qname(queue));
@@ -229,6 +197,9 @@ static void __rpc_init_priority_wait_queue(struct rpc_wait_queue *queue, const c
229 INIT_LIST_HEAD(&queue->tasks[i]); 197 INIT_LIST_HEAD(&queue->tasks[i]);
230 queue->maxpriority = nr_queues - 1; 198 queue->maxpriority = nr_queues - 1;
231 rpc_reset_waitqueue_priority(queue); 199 rpc_reset_waitqueue_priority(queue);
200 queue->qlen = 0;
201 setup_timer(&queue->timer_list.timer, __rpc_queue_timer_fn, (unsigned long)queue);
202 INIT_LIST_HEAD(&queue->timer_list.list);
232#ifdef RPC_DEBUG 203#ifdef RPC_DEBUG
233 queue->name = qname; 204 queue->name = qname;
234#endif 205#endif
@@ -245,6 +216,12 @@ void rpc_init_wait_queue(struct rpc_wait_queue *queue, const char *qname)
245} 216}
246EXPORT_SYMBOL_GPL(rpc_init_wait_queue); 217EXPORT_SYMBOL_GPL(rpc_init_wait_queue);
247 218
219void rpc_destroy_wait_queue(struct rpc_wait_queue *queue)
220{
221 del_timer_sync(&queue->timer_list.timer);
222}
223EXPORT_SYMBOL_GPL(rpc_destroy_wait_queue);
224
248static int rpc_wait_bit_killable(void *word) 225static int rpc_wait_bit_killable(void *word)
249{ 226{
250 if (fatal_signal_pending(current)) 227 if (fatal_signal_pending(current))
@@ -313,7 +290,6 @@ EXPORT_SYMBOL_GPL(__rpc_wait_for_completion_task);
313 */ 290 */
314static void rpc_make_runnable(struct rpc_task *task) 291static void rpc_make_runnable(struct rpc_task *task)
315{ 292{
316 BUG_ON(task->tk_timeout_fn);
317 rpc_clear_queued(task); 293 rpc_clear_queued(task);
318 if (rpc_test_and_set_running(task)) 294 if (rpc_test_and_set_running(task))
319 return; 295 return;
@@ -326,7 +302,7 @@ static void rpc_make_runnable(struct rpc_task *task)
326 int status; 302 int status;
327 303
328 INIT_WORK(&task->u.tk_work, rpc_async_schedule); 304 INIT_WORK(&task->u.tk_work, rpc_async_schedule);
329 status = queue_work(task->tk_workqueue, &task->u.tk_work); 305 status = queue_work(rpciod_workqueue, &task->u.tk_work);
330 if (status < 0) { 306 if (status < 0) {
331 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status); 307 printk(KERN_WARNING "RPC: failed to add task to queue: error: %d!\n", status);
332 task->tk_status = status; 308 task->tk_status = status;
@@ -343,7 +319,7 @@ static void rpc_make_runnable(struct rpc_task *task)
343 * as it's on a wait queue. 319 * as it's on a wait queue.
344 */ 320 */
345static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 321static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
346 rpc_action action, rpc_action timer) 322 rpc_action action)
347{ 323{
348 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n", 324 dprintk("RPC: %5u sleep_on(queue \"%s\" time %lu)\n",
349 task->tk_pid, rpc_qname(q), jiffies); 325 task->tk_pid, rpc_qname(q), jiffies);
@@ -357,11 +333,11 @@ static void __rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
357 333
358 BUG_ON(task->tk_callback != NULL); 334 BUG_ON(task->tk_callback != NULL);
359 task->tk_callback = action; 335 task->tk_callback = action;
360 __rpc_add_timer(task, timer); 336 __rpc_add_timer(q, task);
361} 337}
362 338
363void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task, 339void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
364 rpc_action action, rpc_action timer) 340 rpc_action action)
365{ 341{
366 /* Mark the task as being activated if so needed */ 342 /* Mark the task as being activated if so needed */
367 rpc_set_active(task); 343 rpc_set_active(task);
@@ -370,18 +346,19 @@ void rpc_sleep_on(struct rpc_wait_queue *q, struct rpc_task *task,
370 * Protect the queue operations. 346 * Protect the queue operations.
371 */ 347 */
372 spin_lock_bh(&q->lock); 348 spin_lock_bh(&q->lock);
373 __rpc_sleep_on(q, task, action, timer); 349 __rpc_sleep_on(q, task, action);
374 spin_unlock_bh(&q->lock); 350 spin_unlock_bh(&q->lock);
375} 351}
376EXPORT_SYMBOL_GPL(rpc_sleep_on); 352EXPORT_SYMBOL_GPL(rpc_sleep_on);
377 353
378/** 354/**
379 * __rpc_do_wake_up_task - wake up a single rpc_task 355 * __rpc_do_wake_up_task - wake up a single rpc_task
356 * @queue: wait queue
380 * @task: task to be woken up 357 * @task: task to be woken up
381 * 358 *
382 * Caller must hold queue->lock, and have cleared the task queued flag. 359 * Caller must hold queue->lock, and have cleared the task queued flag.
383 */ 360 */
384static void __rpc_do_wake_up_task(struct rpc_task *task) 361static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task *task)
385{ 362{
386 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n", 363 dprintk("RPC: %5u __rpc_wake_up_task (now %lu)\n",
387 task->tk_pid, jiffies); 364 task->tk_pid, jiffies);
@@ -395,8 +372,7 @@ static void __rpc_do_wake_up_task(struct rpc_task *task)
395 return; 372 return;
396 } 373 }
397 374
398 __rpc_disable_timer(task); 375 __rpc_remove_wait_queue(queue, task);
399 __rpc_remove_wait_queue(task);
400 376
401 rpc_make_runnable(task); 377 rpc_make_runnable(task);
402 378
@@ -404,48 +380,32 @@ static void __rpc_do_wake_up_task(struct rpc_task *task)
404} 380}
405 381
406/* 382/*
407 * Wake up the specified task 383 * Wake up a queued task while the queue lock is being held
408 */ 384 */
409static void __rpc_wake_up_task(struct rpc_task *task) 385static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)
410{ 386{
411 if (rpc_start_wakeup(task)) { 387 if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue)
412 if (RPC_IS_QUEUED(task)) 388 __rpc_do_wake_up_task(queue, task);
413 __rpc_do_wake_up_task(task);
414 rpc_finish_wakeup(task);
415 }
416} 389}
417 390
418/* 391/*
419 * Default timeout handler if none specified by user 392 * Wake up a task on a specific queue
420 */ 393 */
421static void 394void rpc_wake_up_queued_task(struct rpc_wait_queue *queue, struct rpc_task *task)
422__rpc_default_timer(struct rpc_task *task)
423{ 395{
424 dprintk("RPC: %5u timeout (default timer)\n", task->tk_pid); 396 spin_lock_bh(&queue->lock);
425 task->tk_status = -ETIMEDOUT; 397 rpc_wake_up_task_queue_locked(queue, task);
426 rpc_wake_up_task(task); 398 spin_unlock_bh(&queue->lock);
427} 399}
400EXPORT_SYMBOL_GPL(rpc_wake_up_queued_task);
428 401
429/* 402/*
430 * Wake up the specified task 403 * Wake up the specified task
431 */ 404 */
432void rpc_wake_up_task(struct rpc_task *task) 405static void rpc_wake_up_task(struct rpc_task *task)
433{ 406{
434 rcu_read_lock_bh(); 407 rpc_wake_up_queued_task(task->tk_waitqueue, task);
435 if (rpc_start_wakeup(task)) {
436 if (RPC_IS_QUEUED(task)) {
437 struct rpc_wait_queue *queue = task->u.tk_wait.rpc_waitq;
438
439 /* Note: we're already in a bh-safe context */
440 spin_lock(&queue->lock);
441 __rpc_do_wake_up_task(task);
442 spin_unlock(&queue->lock);
443 }
444 rpc_finish_wakeup(task);
445 }
446 rcu_read_unlock_bh();
447} 408}
448EXPORT_SYMBOL_GPL(rpc_wake_up_task);
449 409
450/* 410/*
451 * Wake up the next task on a priority queue. 411 * Wake up the next task on a priority queue.
@@ -495,7 +455,7 @@ new_queue:
495new_owner: 455new_owner:
496 rpc_set_waitqueue_owner(queue, task->tk_owner); 456 rpc_set_waitqueue_owner(queue, task->tk_owner);
497out: 457out:
498 __rpc_wake_up_task(task); 458 rpc_wake_up_task_queue_locked(queue, task);
499 return task; 459 return task;
500} 460}
501 461
@@ -508,16 +468,14 @@ struct rpc_task * rpc_wake_up_next(struct rpc_wait_queue *queue)
508 468
509 dprintk("RPC: wake_up_next(%p \"%s\")\n", 469 dprintk("RPC: wake_up_next(%p \"%s\")\n",
510 queue, rpc_qname(queue)); 470 queue, rpc_qname(queue));
511 rcu_read_lock_bh(); 471 spin_lock_bh(&queue->lock);
512 spin_lock(&queue->lock);
513 if (RPC_IS_PRIORITY(queue)) 472 if (RPC_IS_PRIORITY(queue))
514 task = __rpc_wake_up_next_priority(queue); 473 task = __rpc_wake_up_next_priority(queue);
515 else { 474 else {
516 task_for_first(task, &queue->tasks[0]) 475 task_for_first(task, &queue->tasks[0])
517 __rpc_wake_up_task(task); 476 rpc_wake_up_task_queue_locked(queue, task);
518 } 477 }
519 spin_unlock(&queue->lock); 478 spin_unlock_bh(&queue->lock);
520 rcu_read_unlock_bh();
521 479
522 return task; 480 return task;
523} 481}
@@ -534,18 +492,16 @@ void rpc_wake_up(struct rpc_wait_queue *queue)
534 struct rpc_task *task, *next; 492 struct rpc_task *task, *next;
535 struct list_head *head; 493 struct list_head *head;
536 494
537 rcu_read_lock_bh(); 495 spin_lock_bh(&queue->lock);
538 spin_lock(&queue->lock);
539 head = &queue->tasks[queue->maxpriority]; 496 head = &queue->tasks[queue->maxpriority];
540 for (;;) { 497 for (;;) {
541 list_for_each_entry_safe(task, next, head, u.tk_wait.list) 498 list_for_each_entry_safe(task, next, head, u.tk_wait.list)
542 __rpc_wake_up_task(task); 499 rpc_wake_up_task_queue_locked(queue, task);
543 if (head == &queue->tasks[0]) 500 if (head == &queue->tasks[0])
544 break; 501 break;
545 head--; 502 head--;
546 } 503 }
547 spin_unlock(&queue->lock); 504 spin_unlock_bh(&queue->lock);
548 rcu_read_unlock_bh();
549} 505}
550EXPORT_SYMBOL_GPL(rpc_wake_up); 506EXPORT_SYMBOL_GPL(rpc_wake_up);
551 507
@@ -561,26 +517,48 @@ void rpc_wake_up_status(struct rpc_wait_queue *queue, int status)
561 struct rpc_task *task, *next; 517 struct rpc_task *task, *next;
562 struct list_head *head; 518 struct list_head *head;
563 519
564 rcu_read_lock_bh(); 520 spin_lock_bh(&queue->lock);
565 spin_lock(&queue->lock);
566 head = &queue->tasks[queue->maxpriority]; 521 head = &queue->tasks[queue->maxpriority];
567 for (;;) { 522 for (;;) {
568 list_for_each_entry_safe(task, next, head, u.tk_wait.list) { 523 list_for_each_entry_safe(task, next, head, u.tk_wait.list) {
569 task->tk_status = status; 524 task->tk_status = status;
570 __rpc_wake_up_task(task); 525 rpc_wake_up_task_queue_locked(queue, task);
571 } 526 }
572 if (head == &queue->tasks[0]) 527 if (head == &queue->tasks[0])
573 break; 528 break;
574 head--; 529 head--;
575 } 530 }
576 spin_unlock(&queue->lock); 531 spin_unlock_bh(&queue->lock);
577 rcu_read_unlock_bh();
578} 532}
579EXPORT_SYMBOL_GPL(rpc_wake_up_status); 533EXPORT_SYMBOL_GPL(rpc_wake_up_status);
580 534
535static void __rpc_queue_timer_fn(unsigned long ptr)
536{
537 struct rpc_wait_queue *queue = (struct rpc_wait_queue *)ptr;
538 struct rpc_task *task, *n;
539 unsigned long expires, now, timeo;
540
541 spin_lock(&queue->lock);
542 expires = now = jiffies;
543 list_for_each_entry_safe(task, n, &queue->timer_list.list, u.tk_wait.timer_list) {
544 timeo = task->u.tk_wait.expires;
545 if (time_after_eq(now, timeo)) {
546 dprintk("RPC: %5u timeout\n", task->tk_pid);
547 task->tk_status = -ETIMEDOUT;
548 rpc_wake_up_task_queue_locked(queue, task);
549 continue;
550 }
551 if (expires == now || time_after(expires, timeo))
552 expires = timeo;
553 }
554 if (!list_empty(&queue->timer_list.list))
555 rpc_set_queue_timer(queue, expires);
556 spin_unlock(&queue->lock);
557}
558
581static void __rpc_atrun(struct rpc_task *task) 559static void __rpc_atrun(struct rpc_task *task)
582{ 560{
583 rpc_wake_up_task(task); 561 task->tk_status = 0;
584} 562}
585 563
586/* 564/*
@@ -589,7 +567,7 @@ static void __rpc_atrun(struct rpc_task *task)
589void rpc_delay(struct rpc_task *task, unsigned long delay) 567void rpc_delay(struct rpc_task *task, unsigned long delay)
590{ 568{
591 task->tk_timeout = delay; 569 task->tk_timeout = delay;
592 rpc_sleep_on(&delay_queue, task, NULL, __rpc_atrun); 570 rpc_sleep_on(&delay_queue, task, __rpc_atrun);
593} 571}
594EXPORT_SYMBOL_GPL(rpc_delay); 572EXPORT_SYMBOL_GPL(rpc_delay);
595 573
@@ -644,10 +622,6 @@ static void __rpc_execute(struct rpc_task *task)
644 BUG_ON(RPC_IS_QUEUED(task)); 622 BUG_ON(RPC_IS_QUEUED(task));
645 623
646 for (;;) { 624 for (;;) {
647 /*
648 * Garbage collection of pending timers...
649 */
650 rpc_delete_timer(task);
651 625
652 /* 626 /*
653 * Execute any pending callback. 627 * Execute any pending callback.
@@ -816,8 +790,6 @@ EXPORT_SYMBOL_GPL(rpc_free);
816static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data) 790static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *task_setup_data)
817{ 791{
818 memset(task, 0, sizeof(*task)); 792 memset(task, 0, sizeof(*task));
819 setup_timer(&task->tk_timer, (void (*)(unsigned long))rpc_run_timer,
820 (unsigned long)task);
821 atomic_set(&task->tk_count, 1); 793 atomic_set(&task->tk_count, 1);
822 task->tk_flags = task_setup_data->flags; 794 task->tk_flags = task_setup_data->flags;
823 task->tk_ops = task_setup_data->callback_ops; 795 task->tk_ops = task_setup_data->callback_ops;
@@ -832,7 +804,7 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
832 task->tk_owner = current->tgid; 804 task->tk_owner = current->tgid;
833 805
834 /* Initialize workqueue for async tasks */ 806 /* Initialize workqueue for async tasks */
835 task->tk_workqueue = rpciod_workqueue; 807 task->tk_workqueue = task_setup_data->workqueue;
836 808
837 task->tk_client = task_setup_data->rpc_client; 809 task->tk_client = task_setup_data->rpc_client;
838 if (task->tk_client != NULL) { 810 if (task->tk_client != NULL) {
@@ -845,12 +817,11 @@ static void rpc_init_task(struct rpc_task *task, const struct rpc_task_setup *ta
845 task->tk_action = rpc_prepare_task; 817 task->tk_action = rpc_prepare_task;
846 818
847 if (task_setup_data->rpc_message != NULL) { 819 if (task_setup_data->rpc_message != NULL) {
848 memcpy(&task->tk_msg, task_setup_data->rpc_message, sizeof(task->tk_msg)); 820 task->tk_msg.rpc_proc = task_setup_data->rpc_message->rpc_proc;
821 task->tk_msg.rpc_argp = task_setup_data->rpc_message->rpc_argp;
822 task->tk_msg.rpc_resp = task_setup_data->rpc_message->rpc_resp;
849 /* Bind the user cred */ 823 /* Bind the user cred */
850 if (task->tk_msg.rpc_cred != NULL) 824 rpcauth_bindcred(task, task_setup_data->rpc_message->rpc_cred, task_setup_data->flags);
851 rpcauth_holdcred(task);
852 else
853 rpcauth_bindcred(task);
854 if (task->tk_action == NULL) 825 if (task->tk_action == NULL)
855 rpc_call_start(task); 826 rpc_call_start(task);
856 } 827 }
@@ -868,13 +839,6 @@ rpc_alloc_task(void)
868 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS); 839 return (struct rpc_task *)mempool_alloc(rpc_task_mempool, GFP_NOFS);
869} 840}
870 841
871static void rpc_free_task(struct rcu_head *rcu)
872{
873 struct rpc_task *task = container_of(rcu, struct rpc_task, u.tk_rcu);
874 dprintk("RPC: %5u freeing task\n", task->tk_pid);
875 mempool_free(task, rpc_task_mempool);
876}
877
878/* 842/*
879 * Create a new task for the specified client. 843 * Create a new task for the specified client.
880 */ 844 */
@@ -898,12 +862,25 @@ out:
898 return task; 862 return task;
899} 863}
900 864
901 865static void rpc_free_task(struct rpc_task *task)
902void rpc_put_task(struct rpc_task *task)
903{ 866{
904 const struct rpc_call_ops *tk_ops = task->tk_ops; 867 const struct rpc_call_ops *tk_ops = task->tk_ops;
905 void *calldata = task->tk_calldata; 868 void *calldata = task->tk_calldata;
906 869
870 if (task->tk_flags & RPC_TASK_DYNAMIC) {
871 dprintk("RPC: %5u freeing task\n", task->tk_pid);
872 mempool_free(task, rpc_task_mempool);
873 }
874 rpc_release_calldata(tk_ops, calldata);
875}
876
877static void rpc_async_release(struct work_struct *work)
878{
879 rpc_free_task(container_of(work, struct rpc_task, u.tk_work));
880}
881
882void rpc_put_task(struct rpc_task *task)
883{
907 if (!atomic_dec_and_test(&task->tk_count)) 884 if (!atomic_dec_and_test(&task->tk_count))
908 return; 885 return;
909 /* Release resources */ 886 /* Release resources */
@@ -915,9 +892,11 @@ void rpc_put_task(struct rpc_task *task)
915 rpc_release_client(task->tk_client); 892 rpc_release_client(task->tk_client);
916 task->tk_client = NULL; 893 task->tk_client = NULL;
917 } 894 }
918 if (task->tk_flags & RPC_TASK_DYNAMIC) 895 if (task->tk_workqueue != NULL) {
919 call_rcu_bh(&task->u.tk_rcu, rpc_free_task); 896 INIT_WORK(&task->u.tk_work, rpc_async_release);
920 rpc_release_calldata(tk_ops, calldata); 897 queue_work(task->tk_workqueue, &task->u.tk_work);
898 } else
899 rpc_free_task(task);
921} 900}
922EXPORT_SYMBOL_GPL(rpc_put_task); 901EXPORT_SYMBOL_GPL(rpc_put_task);
923 902
@@ -937,9 +916,6 @@ static void rpc_release_task(struct rpc_task *task)
937 } 916 }
938 BUG_ON (RPC_IS_QUEUED(task)); 917 BUG_ON (RPC_IS_QUEUED(task));
939 918
940 /* Synchronously delete any running timer */
941 rpc_delete_timer(task);
942
943#ifdef RPC_DEBUG 919#ifdef RPC_DEBUG
944 task->tk_magic = 0; 920 task->tk_magic = 0;
945#endif 921#endif
@@ -1029,11 +1005,20 @@ rpc_destroy_mempool(void)
1029 kmem_cache_destroy(rpc_task_slabp); 1005 kmem_cache_destroy(rpc_task_slabp);
1030 if (rpc_buffer_slabp) 1006 if (rpc_buffer_slabp)
1031 kmem_cache_destroy(rpc_buffer_slabp); 1007 kmem_cache_destroy(rpc_buffer_slabp);
1008 rpc_destroy_wait_queue(&delay_queue);
1032} 1009}
1033 1010
1034int 1011int
1035rpc_init_mempool(void) 1012rpc_init_mempool(void)
1036{ 1013{
1014 /*
1015 * The following is not strictly a mempool initialisation,
1016 * but there is no harm in doing it here
1017 */
1018 rpc_init_wait_queue(&delay_queue, "delayq");
1019 if (!rpciod_start())
1020 goto err_nomem;
1021
1037 rpc_task_slabp = kmem_cache_create("rpc_tasks", 1022 rpc_task_slabp = kmem_cache_create("rpc_tasks",
1038 sizeof(struct rpc_task), 1023 sizeof(struct rpc_task),
1039 0, SLAB_HWCACHE_ALIGN, 1024 0, SLAB_HWCACHE_ALIGN,
@@ -1054,13 +1039,6 @@ rpc_init_mempool(void)
1054 rpc_buffer_slabp); 1039 rpc_buffer_slabp);
1055 if (!rpc_buffer_mempool) 1040 if (!rpc_buffer_mempool)
1056 goto err_nomem; 1041 goto err_nomem;
1057 if (!rpciod_start())
1058 goto err_nomem;
1059 /*
1060 * The following is not strictly a mempool initialisation,
1061 * but there is no harm in doing it here
1062 */
1063 rpc_init_wait_queue(&delay_queue, "delayq");
1064 return 0; 1042 return 0;
1065err_nomem: 1043err_nomem:
1066 rpc_destroy_mempool(); 1044 rpc_destroy_mempool();
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 090af78d68b5..d74c2d269539 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -510,8 +510,7 @@ EXPORT_SYMBOL(svc_destroy);
510static int 510static int
511svc_init_buffer(struct svc_rqst *rqstp, unsigned int size) 511svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
512{ 512{
513 int pages; 513 unsigned int pages, arghi;
514 int arghi;
515 514
516 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. 515 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply.
517 * We assume one is at most one page 516 * We assume one is at most one page
@@ -525,7 +524,7 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
525 rqstp->rq_pages[arghi++] = p; 524 rqstp->rq_pages[arghi++] = p;
526 pages--; 525 pages--;
527 } 526 }
528 return ! pages; 527 return pages == 0;
529} 528}
530 529
531/* 530/*
@@ -534,8 +533,9 @@ svc_init_buffer(struct svc_rqst *rqstp, unsigned int size)
534static void 533static void
535svc_release_buffer(struct svc_rqst *rqstp) 534svc_release_buffer(struct svc_rqst *rqstp)
536{ 535{
537 int i; 536 unsigned int i;
538 for (i=0; i<ARRAY_SIZE(rqstp->rq_pages); i++) 537
538 for (i = 0; i < ARRAY_SIZE(rqstp->rq_pages); i++)
539 if (rqstp->rq_pages[i]) 539 if (rqstp->rq_pages[i])
540 put_page(rqstp->rq_pages[i]); 540 put_page(rqstp->rq_pages[i]);
541} 541}
@@ -590,7 +590,7 @@ __svc_create_thread(svc_thread_fn func, struct svc_serv *serv,
590 struct svc_rqst *rqstp; 590 struct svc_rqst *rqstp;
591 int error = -ENOMEM; 591 int error = -ENOMEM;
592 int have_oldmask = 0; 592 int have_oldmask = 0;
593 cpumask_t oldmask; 593 cpumask_t uninitialized_var(oldmask);
594 594
595 rqstp = svc_prepare_thread(serv, pool); 595 rqstp = svc_prepare_thread(serv, pool);
596 if (IS_ERR(rqstp)) { 596 if (IS_ERR(rqstp)) {
@@ -619,16 +619,6 @@ out_thread:
619} 619}
620 620
621/* 621/*
622 * Create a thread in the default pool. Caller must hold BKL.
623 */
624int
625svc_create_thread(svc_thread_fn func, struct svc_serv *serv)
626{
627 return __svc_create_thread(func, serv, &serv->sv_pools[0]);
628}
629EXPORT_SYMBOL(svc_create_thread);
630
631/*
632 * Choose a pool in which to create a new thread, for svc_set_num_threads 622 * Choose a pool in which to create a new thread, for svc_set_num_threads
633 */ 623 */
634static inline struct svc_pool * 624static inline struct svc_pool *
@@ -921,8 +911,7 @@ svc_process(struct svc_rqst *rqstp)
921 case SVC_OK: 911 case SVC_OK:
922 break; 912 break;
923 case SVC_GARBAGE: 913 case SVC_GARBAGE:
924 rpc_stat = rpc_garbage_args; 914 goto err_garbage;
925 goto err_bad;
926 case SVC_SYSERR: 915 case SVC_SYSERR:
927 rpc_stat = rpc_system_err; 916 rpc_stat = rpc_system_err;
928 goto err_bad; 917 goto err_bad;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 332eb47539e1..d8e8d79a8451 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -18,6 +18,7 @@
18#include <linux/skbuff.h> 18#include <linux/skbuff.h>
19#include <linux/file.h> 19#include <linux/file.h>
20#include <linux/freezer.h> 20#include <linux/freezer.h>
21#include <linux/kthread.h>
21#include <net/sock.h> 22#include <net/sock.h>
22#include <net/checksum.h> 23#include <net/checksum.h>
23#include <net/ip.h> 24#include <net/ip.h>
@@ -586,8 +587,12 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
586 while (rqstp->rq_pages[i] == NULL) { 587 while (rqstp->rq_pages[i] == NULL) {
587 struct page *p = alloc_page(GFP_KERNEL); 588 struct page *p = alloc_page(GFP_KERNEL);
588 if (!p) { 589 if (!p) {
589 int j = msecs_to_jiffies(500); 590 set_current_state(TASK_INTERRUPTIBLE);
590 schedule_timeout_uninterruptible(j); 591 if (signalled() || kthread_should_stop()) {
592 set_current_state(TASK_RUNNING);
593 return -EINTR;
594 }
595 schedule_timeout(msecs_to_jiffies(500));
591 } 596 }
592 rqstp->rq_pages[i] = p; 597 rqstp->rq_pages[i] = p;
593 } 598 }
@@ -607,7 +612,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
607 612
608 try_to_freeze(); 613 try_to_freeze();
609 cond_resched(); 614 cond_resched();
610 if (signalled()) 615 if (signalled() || kthread_should_stop())
611 return -EINTR; 616 return -EINTR;
612 617
613 spin_lock_bh(&pool->sp_lock); 618 spin_lock_bh(&pool->sp_lock);
@@ -626,6 +631,20 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
626 * to bring down the daemons ... 631 * to bring down the daemons ...
627 */ 632 */
628 set_current_state(TASK_INTERRUPTIBLE); 633 set_current_state(TASK_INTERRUPTIBLE);
634
635 /*
636 * checking kthread_should_stop() here allows us to avoid
637 * locking and signalling when stopping kthreads that call
638 * svc_recv. If the thread has already been woken up, then
639 * we can exit here without sleeping. If not, then it
640 * it'll be woken up quickly during the schedule_timeout
641 */
642 if (kthread_should_stop()) {
643 set_current_state(TASK_RUNNING);
644 spin_unlock_bh(&pool->sp_lock);
645 return -EINTR;
646 }
647
629 add_wait_queue(&rqstp->rq_wait, &wait); 648 add_wait_queue(&rqstp->rq_wait, &wait);
630 spin_unlock_bh(&pool->sp_lock); 649 spin_unlock_bh(&pool->sp_lock);
631 650
@@ -641,7 +660,10 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
641 svc_thread_dequeue(pool, rqstp); 660 svc_thread_dequeue(pool, rqstp);
642 spin_unlock_bh(&pool->sp_lock); 661 spin_unlock_bh(&pool->sp_lock);
643 dprintk("svc: server %p, no data yet\n", rqstp); 662 dprintk("svc: server %p, no data yet\n", rqstp);
644 return signalled()? -EINTR : -EAGAIN; 663 if (signalled() || kthread_should_stop())
664 return -EINTR;
665 else
666 return -EAGAIN;
645 } 667 }
646 } 668 }
647 spin_unlock_bh(&pool->sp_lock); 669 spin_unlock_bh(&pool->sp_lock);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 3c64051e4555..3f30ee6006ae 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -11,7 +11,8 @@
11#include <linux/hash.h> 11#include <linux/hash.h>
12#include <linux/string.h> 12#include <linux/string.h>
13#include <net/sock.h> 13#include <net/sock.h>
14 14#include <net/ipv6.h>
15#include <linux/kernel.h>
15#define RPCDBG_FACILITY RPCDBG_AUTH 16#define RPCDBG_FACILITY RPCDBG_AUTH
16 17
17 18
@@ -85,7 +86,7 @@ static void svcauth_unix_domain_release(struct auth_domain *dom)
85struct ip_map { 86struct ip_map {
86 struct cache_head h; 87 struct cache_head h;
87 char m_class[8]; /* e.g. "nfsd" */ 88 char m_class[8]; /* e.g. "nfsd" */
88 struct in_addr m_addr; 89 struct in6_addr m_addr;
89 struct unix_domain *m_client; 90 struct unix_domain *m_client;
90 int m_add_change; 91 int m_add_change;
91}; 92};
@@ -113,12 +114,19 @@ static inline int hash_ip(__be32 ip)
113 return (hash ^ (hash>>8)) & 0xff; 114 return (hash ^ (hash>>8)) & 0xff;
114} 115}
115#endif 116#endif
117static inline int hash_ip6(struct in6_addr ip)
118{
119 return (hash_ip(ip.s6_addr32[0]) ^
120 hash_ip(ip.s6_addr32[1]) ^
121 hash_ip(ip.s6_addr32[2]) ^
122 hash_ip(ip.s6_addr32[3]));
123}
116static int ip_map_match(struct cache_head *corig, struct cache_head *cnew) 124static int ip_map_match(struct cache_head *corig, struct cache_head *cnew)
117{ 125{
118 struct ip_map *orig = container_of(corig, struct ip_map, h); 126 struct ip_map *orig = container_of(corig, struct ip_map, h);
119 struct ip_map *new = container_of(cnew, struct ip_map, h); 127 struct ip_map *new = container_of(cnew, struct ip_map, h);
120 return strcmp(orig->m_class, new->m_class) == 0 128 return strcmp(orig->m_class, new->m_class) == 0
121 && orig->m_addr.s_addr == new->m_addr.s_addr; 129 && ipv6_addr_equal(&orig->m_addr, &new->m_addr);
122} 130}
123static void ip_map_init(struct cache_head *cnew, struct cache_head *citem) 131static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
124{ 132{
@@ -126,7 +134,7 @@ static void ip_map_init(struct cache_head *cnew, struct cache_head *citem)
126 struct ip_map *item = container_of(citem, struct ip_map, h); 134 struct ip_map *item = container_of(citem, struct ip_map, h);
127 135
128 strcpy(new->m_class, item->m_class); 136 strcpy(new->m_class, item->m_class);
129 new->m_addr.s_addr = item->m_addr.s_addr; 137 ipv6_addr_copy(&new->m_addr, &item->m_addr);
130} 138}
131static void update(struct cache_head *cnew, struct cache_head *citem) 139static void update(struct cache_head *cnew, struct cache_head *citem)
132{ 140{
@@ -150,22 +158,24 @@ static void ip_map_request(struct cache_detail *cd,
150 struct cache_head *h, 158 struct cache_head *h,
151 char **bpp, int *blen) 159 char **bpp, int *blen)
152{ 160{
153 char text_addr[20]; 161 char text_addr[40];
154 struct ip_map *im = container_of(h, struct ip_map, h); 162 struct ip_map *im = container_of(h, struct ip_map, h);
155 __be32 addr = im->m_addr.s_addr;
156
157 snprintf(text_addr, 20, "%u.%u.%u.%u",
158 ntohl(addr) >> 24 & 0xff,
159 ntohl(addr) >> 16 & 0xff,
160 ntohl(addr) >> 8 & 0xff,
161 ntohl(addr) >> 0 & 0xff);
162 163
164 if (ipv6_addr_v4mapped(&(im->m_addr))) {
165 snprintf(text_addr, 20, NIPQUAD_FMT,
166 ntohl(im->m_addr.s6_addr32[3]) >> 24 & 0xff,
167 ntohl(im->m_addr.s6_addr32[3]) >> 16 & 0xff,
168 ntohl(im->m_addr.s6_addr32[3]) >> 8 & 0xff,
169 ntohl(im->m_addr.s6_addr32[3]) >> 0 & 0xff);
170 } else {
171 snprintf(text_addr, 40, NIP6_FMT, NIP6(im->m_addr));
172 }
163 qword_add(bpp, blen, im->m_class); 173 qword_add(bpp, blen, im->m_class);
164 qword_add(bpp, blen, text_addr); 174 qword_add(bpp, blen, text_addr);
165 (*bpp)[-1] = '\n'; 175 (*bpp)[-1] = '\n';
166} 176}
167 177
168static struct ip_map *ip_map_lookup(char *class, struct in_addr addr); 178static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr);
169static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry); 179static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
170 180
171static int ip_map_parse(struct cache_detail *cd, 181static int ip_map_parse(struct cache_detail *cd,
@@ -176,10 +186,10 @@ static int ip_map_parse(struct cache_detail *cd,
176 * for scratch: */ 186 * for scratch: */
177 char *buf = mesg; 187 char *buf = mesg;
178 int len; 188 int len;
179 int b1,b2,b3,b4; 189 int b1, b2, b3, b4, b5, b6, b7, b8;
180 char c; 190 char c;
181 char class[8]; 191 char class[8];
182 struct in_addr addr; 192 struct in6_addr addr;
183 int err; 193 int err;
184 194
185 struct ip_map *ipmp; 195 struct ip_map *ipmp;
@@ -198,7 +208,23 @@ static int ip_map_parse(struct cache_detail *cd,
198 len = qword_get(&mesg, buf, mlen); 208 len = qword_get(&mesg, buf, mlen);
199 if (len <= 0) return -EINVAL; 209 if (len <= 0) return -EINVAL;
200 210
201 if (sscanf(buf, "%u.%u.%u.%u%c", &b1, &b2, &b3, &b4, &c) != 4) 211 if (sscanf(buf, NIPQUAD_FMT "%c", &b1, &b2, &b3, &b4, &c) == 4) {
212 addr.s6_addr32[0] = 0;
213 addr.s6_addr32[1] = 0;
214 addr.s6_addr32[2] = htonl(0xffff);
215 addr.s6_addr32[3] =
216 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
217 } else if (sscanf(buf, NIP6_FMT "%c",
218 &b1, &b2, &b3, &b4, &b5, &b6, &b7, &b8, &c) == 8) {
219 addr.s6_addr16[0] = htons(b1);
220 addr.s6_addr16[1] = htons(b2);
221 addr.s6_addr16[2] = htons(b3);
222 addr.s6_addr16[3] = htons(b4);
223 addr.s6_addr16[4] = htons(b5);
224 addr.s6_addr16[5] = htons(b6);
225 addr.s6_addr16[6] = htons(b7);
226 addr.s6_addr16[7] = htons(b8);
227 } else
202 return -EINVAL; 228 return -EINVAL;
203 229
204 expiry = get_expiry(&mesg); 230 expiry = get_expiry(&mesg);
@@ -216,10 +242,7 @@ static int ip_map_parse(struct cache_detail *cd,
216 } else 242 } else
217 dom = NULL; 243 dom = NULL;
218 244
219 addr.s_addr = 245 ipmp = ip_map_lookup(class, &addr);
220 htonl((((((b1<<8)|b2)<<8)|b3)<<8)|b4);
221
222 ipmp = ip_map_lookup(class,addr);
223 if (ipmp) { 246 if (ipmp) {
224 err = ip_map_update(ipmp, 247 err = ip_map_update(ipmp,
225 container_of(dom, struct unix_domain, h), 248 container_of(dom, struct unix_domain, h),
@@ -239,7 +262,7 @@ static int ip_map_show(struct seq_file *m,
239 struct cache_head *h) 262 struct cache_head *h)
240{ 263{
241 struct ip_map *im; 264 struct ip_map *im;
242 struct in_addr addr; 265 struct in6_addr addr;
243 char *dom = "-no-domain-"; 266 char *dom = "-no-domain-";
244 267
245 if (h == NULL) { 268 if (h == NULL) {
@@ -248,20 +271,24 @@ static int ip_map_show(struct seq_file *m,
248 } 271 }
249 im = container_of(h, struct ip_map, h); 272 im = container_of(h, struct ip_map, h);
250 /* class addr domain */ 273 /* class addr domain */
251 addr = im->m_addr; 274 ipv6_addr_copy(&addr, &im->m_addr);
252 275
253 if (test_bit(CACHE_VALID, &h->flags) && 276 if (test_bit(CACHE_VALID, &h->flags) &&
254 !test_bit(CACHE_NEGATIVE, &h->flags)) 277 !test_bit(CACHE_NEGATIVE, &h->flags))
255 dom = im->m_client->h.name; 278 dom = im->m_client->h.name;
256 279
257 seq_printf(m, "%s %d.%d.%d.%d %s\n", 280 if (ipv6_addr_v4mapped(&addr)) {
258 im->m_class, 281 seq_printf(m, "%s" NIPQUAD_FMT "%s\n",
259 ntohl(addr.s_addr) >> 24 & 0xff, 282 im->m_class,
260 ntohl(addr.s_addr) >> 16 & 0xff, 283 ntohl(addr.s6_addr32[3]) >> 24 & 0xff,
261 ntohl(addr.s_addr) >> 8 & 0xff, 284 ntohl(addr.s6_addr32[3]) >> 16 & 0xff,
262 ntohl(addr.s_addr) >> 0 & 0xff, 285 ntohl(addr.s6_addr32[3]) >> 8 & 0xff,
263 dom 286 ntohl(addr.s6_addr32[3]) >> 0 & 0xff,
264 ); 287 dom);
288 } else {
289 seq_printf(m, "%s" NIP6_FMT "%s\n",
290 im->m_class, NIP6(addr), dom);
291 }
265 return 0; 292 return 0;
266} 293}
267 294
@@ -281,16 +308,16 @@ struct cache_detail ip_map_cache = {
281 .alloc = ip_map_alloc, 308 .alloc = ip_map_alloc,
282}; 309};
283 310
284static struct ip_map *ip_map_lookup(char *class, struct in_addr addr) 311static struct ip_map *ip_map_lookup(char *class, struct in6_addr *addr)
285{ 312{
286 struct ip_map ip; 313 struct ip_map ip;
287 struct cache_head *ch; 314 struct cache_head *ch;
288 315
289 strcpy(ip.m_class, class); 316 strcpy(ip.m_class, class);
290 ip.m_addr = addr; 317 ipv6_addr_copy(&ip.m_addr, addr);
291 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h, 318 ch = sunrpc_cache_lookup(&ip_map_cache, &ip.h,
292 hash_str(class, IP_HASHBITS) ^ 319 hash_str(class, IP_HASHBITS) ^
293 hash_ip(addr.s_addr)); 320 hash_ip6(*addr));
294 321
295 if (ch) 322 if (ch)
296 return container_of(ch, struct ip_map, h); 323 return container_of(ch, struct ip_map, h);
@@ -319,14 +346,14 @@ static int ip_map_update(struct ip_map *ipm, struct unix_domain *udom, time_t ex
319 ch = sunrpc_cache_update(&ip_map_cache, 346 ch = sunrpc_cache_update(&ip_map_cache,
320 &ip.h, &ipm->h, 347 &ip.h, &ipm->h,
321 hash_str(ipm->m_class, IP_HASHBITS) ^ 348 hash_str(ipm->m_class, IP_HASHBITS) ^
322 hash_ip(ipm->m_addr.s_addr)); 349 hash_ip6(ipm->m_addr));
323 if (!ch) 350 if (!ch)
324 return -ENOMEM; 351 return -ENOMEM;
325 cache_put(ch, &ip_map_cache); 352 cache_put(ch, &ip_map_cache);
326 return 0; 353 return 0;
327} 354}
328 355
329int auth_unix_add_addr(struct in_addr addr, struct auth_domain *dom) 356int auth_unix_add_addr(struct in6_addr *addr, struct auth_domain *dom)
330{ 357{
331 struct unix_domain *udom; 358 struct unix_domain *udom;
332 struct ip_map *ipmp; 359 struct ip_map *ipmp;
@@ -355,7 +382,7 @@ int auth_unix_forget_old(struct auth_domain *dom)
355} 382}
356EXPORT_SYMBOL(auth_unix_forget_old); 383EXPORT_SYMBOL(auth_unix_forget_old);
357 384
358struct auth_domain *auth_unix_lookup(struct in_addr addr) 385struct auth_domain *auth_unix_lookup(struct in6_addr *addr)
359{ 386{
360 struct ip_map *ipm; 387 struct ip_map *ipm;
361 struct auth_domain *rv; 388 struct auth_domain *rv;
@@ -650,9 +677,24 @@ static int unix_gid_find(uid_t uid, struct group_info **gip,
650int 677int
651svcauth_unix_set_client(struct svc_rqst *rqstp) 678svcauth_unix_set_client(struct svc_rqst *rqstp)
652{ 679{
653 struct sockaddr_in *sin = svc_addr_in(rqstp); 680 struct sockaddr_in *sin;
681 struct sockaddr_in6 *sin6, sin6_storage;
654 struct ip_map *ipm; 682 struct ip_map *ipm;
655 683
684 switch (rqstp->rq_addr.ss_family) {
685 case AF_INET:
686 sin = svc_addr_in(rqstp);
687 sin6 = &sin6_storage;
688 ipv6_addr_set(&sin6->sin6_addr, 0, 0,
689 htonl(0x0000FFFF), sin->sin_addr.s_addr);
690 break;
691 case AF_INET6:
692 sin6 = svc_addr_in6(rqstp);
693 break;
694 default:
695 BUG();
696 }
697
656 rqstp->rq_client = NULL; 698 rqstp->rq_client = NULL;
657 if (rqstp->rq_proc == 0) 699 if (rqstp->rq_proc == 0)
658 return SVC_OK; 700 return SVC_OK;
@@ -660,7 +702,7 @@ svcauth_unix_set_client(struct svc_rqst *rqstp)
660 ipm = ip_map_cached_get(rqstp); 702 ipm = ip_map_cached_get(rqstp);
661 if (ipm == NULL) 703 if (ipm == NULL)
662 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class, 704 ipm = ip_map_lookup(rqstp->rq_server->sv_program->pg_class,
663 sin->sin_addr); 705 &sin6->sin6_addr);
664 706
665 if (ipm == NULL) 707 if (ipm == NULL)
666 return SVC_DENIED; 708 return SVC_DENIED;
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index c475977de05a..3e65719f1ef6 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -38,6 +38,7 @@
38#include <net/checksum.h> 38#include <net/checksum.h>
39#include <net/ip.h> 39#include <net/ip.h>
40#include <net/ipv6.h> 40#include <net/ipv6.h>
41#include <net/tcp.h>
41#include <net/tcp_states.h> 42#include <net/tcp_states.h>
42#include <asm/uaccess.h> 43#include <asm/uaccess.h>
43#include <asm/ioctls.h> 44#include <asm/ioctls.h>
@@ -45,6 +46,7 @@
45#include <linux/sunrpc/types.h> 46#include <linux/sunrpc/types.h>
46#include <linux/sunrpc/clnt.h> 47#include <linux/sunrpc/clnt.h>
47#include <linux/sunrpc/xdr.h> 48#include <linux/sunrpc/xdr.h>
49#include <linux/sunrpc/msg_prot.h>
48#include <linux/sunrpc/svcsock.h> 50#include <linux/sunrpc/svcsock.h>
49#include <linux/sunrpc/stats.h> 51#include <linux/sunrpc/stats.h>
50 52
@@ -822,8 +824,8 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
822 * the next four bytes. Otherwise try to gobble up as much as 824 * the next four bytes. Otherwise try to gobble up as much as
823 * possible up to the complete record length. 825 * possible up to the complete record length.
824 */ 826 */
825 if (svsk->sk_tcplen < 4) { 827 if (svsk->sk_tcplen < sizeof(rpc_fraghdr)) {
826 unsigned long want = 4 - svsk->sk_tcplen; 828 int want = sizeof(rpc_fraghdr) - svsk->sk_tcplen;
827 struct kvec iov; 829 struct kvec iov;
828 830
829 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; 831 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen;
@@ -833,32 +835,31 @@ static int svc_tcp_recvfrom(struct svc_rqst *rqstp)
833 svsk->sk_tcplen += len; 835 svsk->sk_tcplen += len;
834 836
835 if (len < want) { 837 if (len < want) {
836 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 838 dprintk("svc: short recvfrom while reading record "
837 len, want); 839 "length (%d of %d)\n", len, want);
838 svc_xprt_received(&svsk->sk_xprt); 840 svc_xprt_received(&svsk->sk_xprt);
839 return -EAGAIN; /* record header not complete */ 841 return -EAGAIN; /* record header not complete */
840 } 842 }
841 843
842 svsk->sk_reclen = ntohl(svsk->sk_reclen); 844 svsk->sk_reclen = ntohl(svsk->sk_reclen);
843 if (!(svsk->sk_reclen & 0x80000000)) { 845 if (!(svsk->sk_reclen & RPC_LAST_STREAM_FRAGMENT)) {
844 /* FIXME: technically, a record can be fragmented, 846 /* FIXME: technically, a record can be fragmented,
845 * and non-terminal fragments will not have the top 847 * and non-terminal fragments will not have the top
846 * bit set in the fragment length header. 848 * bit set in the fragment length header.
847 * But apparently no known nfs clients send fragmented 849 * But apparently no known nfs clients send fragmented
848 * records. */ 850 * records. */
849 if (net_ratelimit()) 851 if (net_ratelimit())
850 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 852 printk(KERN_NOTICE "RPC: multiple fragments "
851 " (non-terminal)\n", 853 "per record not supported\n");
852 (unsigned long) svsk->sk_reclen);
853 goto err_delete; 854 goto err_delete;
854 } 855 }
855 svsk->sk_reclen &= 0x7fffffff; 856 svsk->sk_reclen &= RPC_FRAGMENT_SIZE_MASK;
856 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 857 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen);
857 if (svsk->sk_reclen > serv->sv_max_mesg) { 858 if (svsk->sk_reclen > serv->sv_max_mesg) {
858 if (net_ratelimit()) 859 if (net_ratelimit())
859 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 860 printk(KERN_NOTICE "RPC: "
860 " (large)\n", 861 "fragment too large: 0x%08lx\n",
861 (unsigned long) svsk->sk_reclen); 862 (unsigned long)svsk->sk_reclen);
862 goto err_delete; 863 goto err_delete;
863 } 864 }
864 } 865 }
@@ -1045,7 +1046,6 @@ void svc_cleanup_xprt_sock(void)
1045static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) 1046static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1046{ 1047{
1047 struct sock *sk = svsk->sk_sk; 1048 struct sock *sk = svsk->sk_sk;
1048 struct tcp_sock *tp = tcp_sk(sk);
1049 1049
1050 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); 1050 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv);
1051 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); 1051 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
@@ -1063,7 +1063,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1063 svsk->sk_reclen = 0; 1063 svsk->sk_reclen = 0;
1064 svsk->sk_tcplen = 0; 1064 svsk->sk_tcplen = 0;
1065 1065
1066 tp->nonagle = 1; /* disable Nagle's algorithm */ 1066 tcp_sk(sk)->nonagle |= TCP_NAGLE_OFF;
1067 1067
1068 /* initialise setting must have enough space to 1068 /* initialise setting must have enough space to
1069 * receive and respond to one request. 1069 * receive and respond to one request.
@@ -1101,6 +1101,7 @@ void svc_sock_update_bufs(struct svc_serv *serv)
1101 } 1101 }
1102 spin_unlock_bh(&serv->sv_lock); 1102 spin_unlock_bh(&serv->sv_lock);
1103} 1103}
1104EXPORT_SYMBOL(svc_sock_update_bufs);
1104 1105
1105/* 1106/*
1106 * Initialize socket for RPC use and create svc_sock struct 1107 * Initialize socket for RPC use and create svc_sock struct
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index d5553b8179f9..75d748eee0eb 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -188,9 +188,9 @@ out_sleep:
188 task->tk_timeout = 0; 188 task->tk_timeout = 0;
189 task->tk_status = -EAGAIN; 189 task->tk_status = -EAGAIN;
190 if (req && req->rq_ntrans) 190 if (req && req->rq_ntrans)
191 rpc_sleep_on(&xprt->resend, task, NULL, NULL); 191 rpc_sleep_on(&xprt->resend, task, NULL);
192 else 192 else
193 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 193 rpc_sleep_on(&xprt->sending, task, NULL);
194 return 0; 194 return 0;
195} 195}
196EXPORT_SYMBOL_GPL(xprt_reserve_xprt); 196EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
@@ -238,9 +238,9 @@ out_sleep:
238 task->tk_timeout = 0; 238 task->tk_timeout = 0;
239 task->tk_status = -EAGAIN; 239 task->tk_status = -EAGAIN;
240 if (req && req->rq_ntrans) 240 if (req && req->rq_ntrans)
241 rpc_sleep_on(&xprt->resend, task, NULL, NULL); 241 rpc_sleep_on(&xprt->resend, task, NULL);
242 else 242 else
243 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 243 rpc_sleep_on(&xprt->sending, task, NULL);
244 return 0; 244 return 0;
245} 245}
246EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong); 246EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
@@ -447,13 +447,13 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
447 * @task: task to be put to sleep 447 * @task: task to be put to sleep
448 * 448 *
449 */ 449 */
450void xprt_wait_for_buffer_space(struct rpc_task *task) 450void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
451{ 451{
452 struct rpc_rqst *req = task->tk_rqstp; 452 struct rpc_rqst *req = task->tk_rqstp;
453 struct rpc_xprt *xprt = req->rq_xprt; 453 struct rpc_xprt *xprt = req->rq_xprt;
454 454
455 task->tk_timeout = req->rq_timeout; 455 task->tk_timeout = req->rq_timeout;
456 rpc_sleep_on(&xprt->pending, task, NULL, NULL); 456 rpc_sleep_on(&xprt->pending, task, action);
457} 457}
458EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space); 458EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
459 459
@@ -472,7 +472,7 @@ void xprt_write_space(struct rpc_xprt *xprt)
472 if (xprt->snd_task) { 472 if (xprt->snd_task) {
473 dprintk("RPC: write space: waking waiting task on " 473 dprintk("RPC: write space: waking waiting task on "
474 "xprt %p\n", xprt); 474 "xprt %p\n", xprt);
475 rpc_wake_up_task(xprt->snd_task); 475 rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
476 } 476 }
477 spin_unlock_bh(&xprt->transport_lock); 477 spin_unlock_bh(&xprt->transport_lock);
478} 478}
@@ -602,11 +602,37 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
602 /* Try to schedule an autoclose RPC call */ 602 /* Try to schedule an autoclose RPC call */
603 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0) 603 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
604 queue_work(rpciod_workqueue, &xprt->task_cleanup); 604 queue_work(rpciod_workqueue, &xprt->task_cleanup);
605 else if (xprt->snd_task != NULL) 605 xprt_wake_pending_tasks(xprt, -ENOTCONN);
606 rpc_wake_up_task(xprt->snd_task); 606 spin_unlock_bh(&xprt->transport_lock);
607}
608
609/**
610 * xprt_conditional_disconnect - force a transport to disconnect
611 * @xprt: transport to disconnect
612 * @cookie: 'connection cookie'
613 *
614 * This attempts to break the connection if and only if 'cookie' matches
615 * the current transport 'connection cookie'. It ensures that we don't
616 * try to break the connection more than once when we need to retransmit
617 * a batch of RPC requests.
618 *
619 */
620void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
621{
622 /* Don't race with the test_bit() in xprt_clear_locked() */
623 spin_lock_bh(&xprt->transport_lock);
624 if (cookie != xprt->connect_cookie)
625 goto out;
626 if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
627 goto out;
628 set_bit(XPRT_CLOSE_WAIT, &xprt->state);
629 /* Try to schedule an autoclose RPC call */
630 if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
631 queue_work(rpciod_workqueue, &xprt->task_cleanup);
632 xprt_wake_pending_tasks(xprt, -ENOTCONN);
633out:
607 spin_unlock_bh(&xprt->transport_lock); 634 spin_unlock_bh(&xprt->transport_lock);
608} 635}
609EXPORT_SYMBOL_GPL(xprt_force_disconnect);
610 636
611static void 637static void
612xprt_init_autodisconnect(unsigned long data) 638xprt_init_autodisconnect(unsigned long data)
@@ -653,7 +679,7 @@ void xprt_connect(struct rpc_task *task)
653 task->tk_rqstp->rq_bytes_sent = 0; 679 task->tk_rqstp->rq_bytes_sent = 0;
654 680
655 task->tk_timeout = xprt->connect_timeout; 681 task->tk_timeout = xprt->connect_timeout;
656 rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL); 682 rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
657 xprt->stat.connect_start = jiffies; 683 xprt->stat.connect_start = jiffies;
658 xprt->ops->connect(task); 684 xprt->ops->connect(task);
659 } 685 }
@@ -749,18 +775,20 @@ EXPORT_SYMBOL_GPL(xprt_update_rtt);
749void xprt_complete_rqst(struct rpc_task *task, int copied) 775void xprt_complete_rqst(struct rpc_task *task, int copied)
750{ 776{
751 struct rpc_rqst *req = task->tk_rqstp; 777 struct rpc_rqst *req = task->tk_rqstp;
778 struct rpc_xprt *xprt = req->rq_xprt;
752 779
753 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n", 780 dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
754 task->tk_pid, ntohl(req->rq_xid), copied); 781 task->tk_pid, ntohl(req->rq_xid), copied);
755 782
756 task->tk_xprt->stat.recvs++; 783 xprt->stat.recvs++;
757 task->tk_rtt = (long)jiffies - req->rq_xtime; 784 task->tk_rtt = (long)jiffies - req->rq_xtime;
758 785
759 list_del_init(&req->rq_list); 786 list_del_init(&req->rq_list);
787 req->rq_private_buf.len = copied;
760 /* Ensure all writes are done before we update req->rq_received */ 788 /* Ensure all writes are done before we update req->rq_received */
761 smp_wmb(); 789 smp_wmb();
762 req->rq_received = req->rq_private_buf.len = copied; 790 req->rq_received = copied;
763 rpc_wake_up_task(task); 791 rpc_wake_up_queued_task(&xprt->pending, task);
764} 792}
765EXPORT_SYMBOL_GPL(xprt_complete_rqst); 793EXPORT_SYMBOL_GPL(xprt_complete_rqst);
766 794
@@ -769,17 +797,17 @@ static void xprt_timer(struct rpc_task *task)
769 struct rpc_rqst *req = task->tk_rqstp; 797 struct rpc_rqst *req = task->tk_rqstp;
770 struct rpc_xprt *xprt = req->rq_xprt; 798 struct rpc_xprt *xprt = req->rq_xprt;
771 799
800 if (task->tk_status != -ETIMEDOUT)
801 return;
772 dprintk("RPC: %5u xprt_timer\n", task->tk_pid); 802 dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
773 803
774 spin_lock(&xprt->transport_lock); 804 spin_lock_bh(&xprt->transport_lock);
775 if (!req->rq_received) { 805 if (!req->rq_received) {
776 if (xprt->ops->timer) 806 if (xprt->ops->timer)
777 xprt->ops->timer(task); 807 xprt->ops->timer(task);
778 task->tk_status = -ETIMEDOUT; 808 } else
779 } 809 task->tk_status = 0;
780 task->tk_timeout = 0; 810 spin_unlock_bh(&xprt->transport_lock);
781 rpc_wake_up_task(task);
782 spin_unlock(&xprt->transport_lock);
783} 811}
784 812
785/** 813/**
@@ -849,6 +877,7 @@ void xprt_transmit(struct rpc_task *task)
849 } else if (!req->rq_bytes_sent) 877 } else if (!req->rq_bytes_sent)
850 return; 878 return;
851 879
880 req->rq_connect_cookie = xprt->connect_cookie;
852 status = xprt->ops->send_request(task); 881 status = xprt->ops->send_request(task);
853 if (status == 0) { 882 if (status == 0) {
854 dprintk("RPC: %5u xmit complete\n", task->tk_pid); 883 dprintk("RPC: %5u xmit complete\n", task->tk_pid);
@@ -864,7 +893,7 @@ void xprt_transmit(struct rpc_task *task)
864 if (!xprt_connected(xprt)) 893 if (!xprt_connected(xprt))
865 task->tk_status = -ENOTCONN; 894 task->tk_status = -ENOTCONN;
866 else if (!req->rq_received) 895 else if (!req->rq_received)
867 rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer); 896 rpc_sleep_on(&xprt->pending, task, xprt_timer);
868 spin_unlock_bh(&xprt->transport_lock); 897 spin_unlock_bh(&xprt->transport_lock);
869 return; 898 return;
870 } 899 }
@@ -875,7 +904,7 @@ void xprt_transmit(struct rpc_task *task)
875 */ 904 */
876 task->tk_status = status; 905 task->tk_status = status;
877 if (status == -ECONNREFUSED) 906 if (status == -ECONNREFUSED)
878 rpc_sleep_on(&xprt->sending, task, NULL, NULL); 907 rpc_sleep_on(&xprt->sending, task, NULL);
879} 908}
880 909
881static inline void do_xprt_reserve(struct rpc_task *task) 910static inline void do_xprt_reserve(struct rpc_task *task)
@@ -895,7 +924,7 @@ static inline void do_xprt_reserve(struct rpc_task *task)
895 dprintk("RPC: waiting for request slot\n"); 924 dprintk("RPC: waiting for request slot\n");
896 task->tk_status = -EAGAIN; 925 task->tk_status = -EAGAIN;
897 task->tk_timeout = 0; 926 task->tk_timeout = 0;
898 rpc_sleep_on(&xprt->backlog, task, NULL, NULL); 927 rpc_sleep_on(&xprt->backlog, task, NULL);
899} 928}
900 929
901/** 930/**
@@ -1052,6 +1081,11 @@ static void xprt_destroy(struct kref *kref)
1052 xprt->shutdown = 1; 1081 xprt->shutdown = 1;
1053 del_timer_sync(&xprt->timer); 1082 del_timer_sync(&xprt->timer);
1054 1083
1084 rpc_destroy_wait_queue(&xprt->binding);
1085 rpc_destroy_wait_queue(&xprt->pending);
1086 rpc_destroy_wait_queue(&xprt->sending);
1087 rpc_destroy_wait_queue(&xprt->resend);
1088 rpc_destroy_wait_queue(&xprt->backlog);
1055 /* 1089 /*
1056 * Tear down transport state and free the rpc_xprt 1090 * Tear down transport state and free the rpc_xprt
1057 */ 1091 */
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 16fd3f6718ff..af408fc12634 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1036,6 +1036,8 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1036 wait_event(xprt->sc_send_wait, 1036 wait_event(xprt->sc_send_wait,
1037 atomic_read(&xprt->sc_sq_count) < 1037 atomic_read(&xprt->sc_sq_count) <
1038 xprt->sc_sq_depth); 1038 xprt->sc_sq_depth);
1039 if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
1040 return 0;
1039 continue; 1041 continue;
1040 } 1042 }
1041 /* Bumped used SQ WR count and post */ 1043 /* Bumped used SQ WR count and post */
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 613daf8c1ff7..ddbe981ab516 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -136,12 +136,6 @@ static ctl_table sunrpc_table[] = {
136#endif 136#endif
137 137
138/* 138/*
139 * How many times to try sending a request on a socket before waiting
140 * for the socket buffer to clear.
141 */
142#define XS_SENDMSG_RETRY (10U)
143
144/*
145 * Time out for an RPC UDP socket connect. UDP socket connects are 139 * Time out for an RPC UDP socket connect. UDP socket connects are
146 * synchronous, but we set a timeout anyway in case of resource 140 * synchronous, but we set a timeout anyway in case of resource
147 * exhaustion on the local host. 141 * exhaustion on the local host.
@@ -516,6 +510,14 @@ out:
516 return sent; 510 return sent;
517} 511}
518 512
513static void xs_nospace_callback(struct rpc_task *task)
514{
515 struct sock_xprt *transport = container_of(task->tk_rqstp->rq_xprt, struct sock_xprt, xprt);
516
517 transport->inet->sk_write_pending--;
518 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
519}
520
519/** 521/**
520 * xs_nospace - place task on wait queue if transmit was incomplete 522 * xs_nospace - place task on wait queue if transmit was incomplete
521 * @task: task to put to sleep 523 * @task: task to put to sleep
@@ -531,20 +533,27 @@ static void xs_nospace(struct rpc_task *task)
531 task->tk_pid, req->rq_slen - req->rq_bytes_sent, 533 task->tk_pid, req->rq_slen - req->rq_bytes_sent,
532 req->rq_slen); 534 req->rq_slen);
533 535
534 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) { 536 /* Protect against races with write_space */
535 /* Protect against races with write_space */ 537 spin_lock_bh(&xprt->transport_lock);
536 spin_lock_bh(&xprt->transport_lock); 538
537 539 /* Don't race with disconnect */
538 /* Don't race with disconnect */ 540 if (xprt_connected(xprt)) {
539 if (!xprt_connected(xprt)) 541 if (test_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags)) {
540 task->tk_status = -ENOTCONN; 542 /*
541 else if (test_bit(SOCK_NOSPACE, &transport->sock->flags)) 543 * Notify TCP that we're limited by the application
542 xprt_wait_for_buffer_space(task); 544 * window size
545 */
546 set_bit(SOCK_NOSPACE, &transport->sock->flags);
547 transport->inet->sk_write_pending++;
548 /* ...and wait for more buffer space */
549 xprt_wait_for_buffer_space(task, xs_nospace_callback);
550 }
551 } else {
552 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
553 task->tk_status = -ENOTCONN;
554 }
543 555
544 spin_unlock_bh(&xprt->transport_lock); 556 spin_unlock_bh(&xprt->transport_lock);
545 } else
546 /* Keep holding the socket if it is blocked */
547 rpc_delay(task, HZ>>4);
548} 557}
549 558
550/** 559/**
@@ -588,19 +597,20 @@ static int xs_udp_send_request(struct rpc_task *task)
588 } 597 }
589 598
590 switch (status) { 599 switch (status) {
600 case -EAGAIN:
601 xs_nospace(task);
602 break;
591 case -ENETUNREACH: 603 case -ENETUNREACH:
592 case -EPIPE: 604 case -EPIPE:
593 case -ECONNREFUSED: 605 case -ECONNREFUSED:
594 /* When the server has died, an ICMP port unreachable message 606 /* When the server has died, an ICMP port unreachable message
595 * prompts ECONNREFUSED. */ 607 * prompts ECONNREFUSED. */
596 break; 608 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
597 case -EAGAIN:
598 xs_nospace(task);
599 break; 609 break;
600 default: 610 default:
611 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
601 dprintk("RPC: sendmsg returned unrecognized error %d\n", 612 dprintk("RPC: sendmsg returned unrecognized error %d\n",
602 -status); 613 -status);
603 break;
604 } 614 }
605 615
606 return status; 616 return status;
@@ -650,7 +660,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
650 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); 660 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
651 struct xdr_buf *xdr = &req->rq_snd_buf; 661 struct xdr_buf *xdr = &req->rq_snd_buf;
652 int status; 662 int status;
653 unsigned int retry = 0;
654 663
655 xs_encode_tcp_record_marker(&req->rq_snd_buf); 664 xs_encode_tcp_record_marker(&req->rq_snd_buf);
656 665
@@ -681,9 +690,10 @@ static int xs_tcp_send_request(struct rpc_task *task)
681 return 0; 690 return 0;
682 } 691 }
683 692
693 if (status != 0)
694 continue;
684 status = -EAGAIN; 695 status = -EAGAIN;
685 if (retry++ > XS_SENDMSG_RETRY) 696 break;
686 break;
687 } 697 }
688 698
689 switch (status) { 699 switch (status) {
@@ -695,12 +705,13 @@ static int xs_tcp_send_request(struct rpc_task *task)
695 case -ENOTCONN: 705 case -ENOTCONN:
696 case -EPIPE: 706 case -EPIPE:
697 status = -ENOTCONN; 707 status = -ENOTCONN;
708 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
698 break; 709 break;
699 default: 710 default:
700 dprintk("RPC: sendmsg returned unrecognized error %d\n", 711 dprintk("RPC: sendmsg returned unrecognized error %d\n",
701 -status); 712 -status);
713 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
702 xs_tcp_shutdown(xprt); 714 xs_tcp_shutdown(xprt);
703 break;
704 } 715 }
705 716
706 return status; 717 return status;
@@ -1073,6 +1084,7 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1073{ 1084{
1074 struct rpc_xprt *xprt; 1085 struct rpc_xprt *xprt;
1075 read_descriptor_t rd_desc; 1086 read_descriptor_t rd_desc;
1087 int read;
1076 1088
1077 dprintk("RPC: xs_tcp_data_ready...\n"); 1089 dprintk("RPC: xs_tcp_data_ready...\n");
1078 1090
@@ -1084,8 +1096,10 @@ static void xs_tcp_data_ready(struct sock *sk, int bytes)
1084 1096
1085 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */ 1097 /* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
1086 rd_desc.arg.data = xprt; 1098 rd_desc.arg.data = xprt;
1087 rd_desc.count = 65536; 1099 do {
1088 tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv); 1100 rd_desc.count = 65536;
1101 read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
1102 } while (read > 0);
1089out: 1103out:
1090 read_unlock(&sk->sk_callback_lock); 1104 read_unlock(&sk->sk_callback_lock);
1091} 1105}
@@ -1128,6 +1142,7 @@ static void xs_tcp_state_change(struct sock *sk)
1128 break; 1142 break;
1129 case TCP_FIN_WAIT1: 1143 case TCP_FIN_WAIT1:
1130 /* The client initiated a shutdown of the socket */ 1144 /* The client initiated a shutdown of the socket */
1145 xprt->connect_cookie++;
1131 xprt->reestablish_timeout = 0; 1146 xprt->reestablish_timeout = 0;
1132 set_bit(XPRT_CLOSING, &xprt->state); 1147 set_bit(XPRT_CLOSING, &xprt->state);
1133 smp_mb__before_clear_bit(); 1148 smp_mb__before_clear_bit();
@@ -1140,6 +1155,7 @@ static void xs_tcp_state_change(struct sock *sk)
1140 set_bit(XPRT_CLOSING, &xprt->state); 1155 set_bit(XPRT_CLOSING, &xprt->state);
1141 xprt_force_disconnect(xprt); 1156 xprt_force_disconnect(xprt);
1142 case TCP_SYN_SENT: 1157 case TCP_SYN_SENT:
1158 xprt->connect_cookie++;
1143 case TCP_CLOSING: 1159 case TCP_CLOSING:
1144 /* 1160 /*
1145 * If the server closed down the connection, make sure that 1161 * If the server closed down the connection, make sure that
@@ -1186,9 +1202,11 @@ static void xs_udp_write_space(struct sock *sk)
1186 1202
1187 if (unlikely(!(sock = sk->sk_socket))) 1203 if (unlikely(!(sock = sk->sk_socket)))
1188 goto out; 1204 goto out;
1205 clear_bit(SOCK_NOSPACE, &sock->flags);
1206
1189 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1207 if (unlikely(!(xprt = xprt_from_sock(sk))))
1190 goto out; 1208 goto out;
1191 if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) 1209 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1192 goto out; 1210 goto out;
1193 1211
1194 xprt_write_space(xprt); 1212 xprt_write_space(xprt);
@@ -1219,9 +1237,11 @@ static void xs_tcp_write_space(struct sock *sk)
1219 1237
1220 if (unlikely(!(sock = sk->sk_socket))) 1238 if (unlikely(!(sock = sk->sk_socket)))
1221 goto out; 1239 goto out;
1240 clear_bit(SOCK_NOSPACE, &sock->flags);
1241
1222 if (unlikely(!(xprt = xprt_from_sock(sk)))) 1242 if (unlikely(!(xprt = xprt_from_sock(sk))))
1223 goto out; 1243 goto out;
1224 if (unlikely(!test_and_clear_bit(SOCK_NOSPACE, &sock->flags))) 1244 if (test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sock->flags) == 0)
1225 goto out; 1245 goto out;
1226 1246
1227 xprt_write_space(xprt); 1247 xprt_write_space(xprt);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 05853159536a..230f9ca2ad6b 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1756,8 +1756,8 @@ static int getsockopt(struct socket *sock,
1756 else if (len < sizeof(value)) { 1756 else if (len < sizeof(value)) {
1757 res = -EINVAL; 1757 res = -EINVAL;
1758 } 1758 }
1759 else if ((res = copy_to_user(ov, &value, sizeof(value)))) { 1759 else if (copy_to_user(ov, &value, sizeof(value))) {
1760 /* couldn't return value */ 1760 res = -EFAULT;
1761 } 1761 }
1762 else { 1762 else {
1763 res = put_user(sizeof(value), ol); 1763 res = put_user(sizeof(value), ol);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 1454afcc06c4..e18cd3628db4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -2197,7 +2197,11 @@ static void __exit af_unix_exit(void)
2197 unregister_pernet_subsys(&unix_net_ops); 2197 unregister_pernet_subsys(&unix_net_ops);
2198} 2198}
2199 2199
2200module_init(af_unix_init); 2200/* Earlier than device_initcall() so that other drivers invoking
2201 request_module() don't end up in a loop when modprobe tries
2202 to use a UNIX socket. But later than subsys_initcall() because
2203 we depend on stuff initialised there */
2204fs_initcall(af_unix_init);
2201module_exit(af_unix_exit); 2205module_exit(af_unix_exit);
2202 2206
2203MODULE_LICENSE("GPL"); 2207MODULE_LICENSE("GPL");
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index ab4d0e598a2c..e0c0390613c0 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1819,7 +1819,7 @@ xfrm_state_ok(struct xfrm_tmpl *tmpl, struct xfrm_state *x,
1819 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) && 1819 (x->id.spi == tmpl->id.spi || !tmpl->id.spi) &&
1820 (x->props.reqid == tmpl->reqid || !tmpl->reqid) && 1820 (x->props.reqid == tmpl->reqid || !tmpl->reqid) &&
1821 x->props.mode == tmpl->mode && 1821 x->props.mode == tmpl->mode &&
1822 ((tmpl->aalgos & (1<<x->props.aalgo)) || 1822 (tmpl->allalgs || (tmpl->aalgos & (1<<x->props.aalgo)) ||
1823 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) && 1823 !(xfrm_id_proto_match(tmpl->id.proto, IPSEC_PROTO_ANY))) &&
1824 !(x->props.mode != XFRM_MODE_TRANSPORT && 1824 !(x->props.mode != XFRM_MODE_TRANSPORT &&
1825 xfrm_state_addr_cmp(tmpl, x, family)); 1825 xfrm_state_addr_cmp(tmpl, x, family));
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 1810f5645bb5..22a30ae582a2 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -981,6 +981,8 @@ static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
981 t->aalgos = ut->aalgos; 981 t->aalgos = ut->aalgos;
982 t->ealgos = ut->ealgos; 982 t->ealgos = ut->ealgos;
983 t->calgos = ut->calgos; 983 t->calgos = ut->calgos;
984 /* If all masks are ~0, then we allow all algorithms. */
985 t->allalgs = !~(t->aalgos & t->ealgos & t->calgos);
984 t->encap_family = ut->family; 986 t->encap_family = ut->family;
985 } 987 }
986} 988}