aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 21:02:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-28 21:02:55 -0500
commitb6669737d3db7df79fad07180837c23dbe581db5 (patch)
tree671a9d13fe5ab00d6a3c7c5c5c466802ca96d38a /net
parent1cf0209c431fa7790253c532039d53b0773193aa (diff)
parentdc107402ae06286a9ed33c32daf3f35514a7cb8d (diff)
Merge branch 'for-3.9' of git://linux-nfs.org/~bfields/linux
Pull nfsd changes from J Bruce Fields: "Miscellaneous bugfixes, plus: - An overhaul of the DRC cache by Jeff Layton. The main effect is just to make it larger. This decreases the chances of intermittent errors especially in the UDP case. But we'll need to watch for any reports of performance regressions. - Containerized nfsd: with some limitations, we now support per-container nfs-service, thanks to extensive work from Stanislav Kinsbursky over the last year." Some notes about conflicts, since there were *two* non-data semantic conflicts here: - idr_remove_all() had been added by a memory leak fix, but has since become deprecated since idr_destroy() does it for us now. - xs_local_connect() had been added by this branch to make AF_LOCAL connections be synchronous, but in the meantime Trond had changed the calling convention in order to avoid a RCU dereference. There were a couple of more obvious actual source-level conflicts due to the hlist traversal changes and one just due to code changes next to each other, but those were trivial. * 'for-3.9' of git://linux-nfs.org/~bfields/linux: (49 commits) SUNRPC: make AF_LOCAL connect synchronous nfsd: fix compiler warning about ambiguous types in nfsd_cache_csum svcrpc: fix rpc server shutdown races svcrpc: make svc_age_temp_xprts enqueue under sv_lock lockd: nlmclnt_reclaim(): avoid stack overflow nfsd: enable NFSv4 state in containers nfsd: disable usermode helper client tracker in container nfsd: use proper net while reading "exports" file nfsd: containerize NFSd filesystem nfsd: fix comments on nfsd_cache_lookup SUNRPC: move cache_detail->cache_request callback call to cache_read() SUNRPC: remove "cache_request" argument in sunrpc_cache_pipe_upcall() function SUNRPC: rework cache upcall logic SUNRPC: introduce cache_detail->cache_request callback NFS: simplify and clean cache library NFS: use SUNRPC cache creation and destruction helper for DNS cache nfsd4: free_stid can be static nfsd: keep a checksum of the first 256 bytes of request sunrpc: trim off trailing checksum before returning decrypted or integrity authenticated buffer sunrpc: fix comment in struct xdr_buf definition ...
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/addr.c3
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c18
-rw-r--r--net/sunrpc/cache.c51
-rw-r--r--net/sunrpc/clnt.c1
-rw-r--r--net/sunrpc/rpcb_clnt.c1
-rw-r--r--net/sunrpc/svc.c13
-rw-r--r--net/sunrpc/svc_xprt.c81
-rw-r--r--net/sunrpc/svcauth_unix.c16
-rw-r--r--net/sunrpc/xdr.c41
-rw-r--r--net/sunrpc/xprtrdma/transport.c1
-rw-r--r--net/sunrpc/xprtsock.c35
12 files changed, 162 insertions, 101 deletions
diff --git a/net/sunrpc/addr.c b/net/sunrpc/addr.c
index d11418f97f1f..a622ad64acd8 100644
--- a/net/sunrpc/addr.c
+++ b/net/sunrpc/addr.c
@@ -17,7 +17,8 @@
17 */ 17 */
18 18
19#include <net/ipv6.h> 19#include <net/ipv6.h>
20#include <linux/sunrpc/clnt.h> 20#include <linux/sunrpc/addr.h>
21#include <linux/sunrpc/msg_prot.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/export.h> 23#include <linux/export.h>
23 24
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 107c4528654f..88edec929d73 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -574,6 +574,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
574 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip; 574 buf->head[0].iov_len -= GSS_KRB5_TOK_HDR_LEN + headskip;
575 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip; 575 buf->len -= GSS_KRB5_TOK_HDR_LEN + headskip;
576 576
577 /* Trim off the checksum blob */
578 xdr_buf_trim(buf, GSS_KRB5_TOK_HDR_LEN + tailskip);
577 return GSS_S_COMPLETE; 579 return GSS_S_COMPLETE;
578} 580}
579 581
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index ecd1d58bf611..f7d34e7b6f81 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -182,12 +182,6 @@ static void rsi_request(struct cache_detail *cd,
182 (*bpp)[-1] = '\n'; 182 (*bpp)[-1] = '\n';
183} 183}
184 184
185static int rsi_upcall(struct cache_detail *cd, struct cache_head *h)
186{
187 return sunrpc_cache_pipe_upcall(cd, h, rsi_request);
188}
189
190
191static int rsi_parse(struct cache_detail *cd, 185static int rsi_parse(struct cache_detail *cd,
192 char *mesg, int mlen) 186 char *mesg, int mlen)
193{ 187{
@@ -275,7 +269,7 @@ static struct cache_detail rsi_cache_template = {
275 .hash_size = RSI_HASHMAX, 269 .hash_size = RSI_HASHMAX,
276 .name = "auth.rpcsec.init", 270 .name = "auth.rpcsec.init",
277 .cache_put = rsi_put, 271 .cache_put = rsi_put,
278 .cache_upcall = rsi_upcall, 272 .cache_request = rsi_request,
279 .cache_parse = rsi_parse, 273 .cache_parse = rsi_parse,
280 .match = rsi_match, 274 .match = rsi_match,
281 .init = rsi_init, 275 .init = rsi_init,
@@ -825,13 +819,17 @@ read_u32_from_xdr_buf(struct xdr_buf *buf, int base, u32 *obj)
825 * The server uses base of head iovec as read pointer, while the 819 * The server uses base of head iovec as read pointer, while the
826 * client uses separate pointer. */ 820 * client uses separate pointer. */
827static int 821static int
828unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx) 822unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
829{ 823{
830 int stat = -EINVAL; 824 int stat = -EINVAL;
831 u32 integ_len, maj_stat; 825 u32 integ_len, maj_stat;
832 struct xdr_netobj mic; 826 struct xdr_netobj mic;
833 struct xdr_buf integ_buf; 827 struct xdr_buf integ_buf;
834 828
829 /* Did we already verify the signature on the original pass through? */
830 if (rqstp->rq_deferred)
831 return 0;
832
835 integ_len = svc_getnl(&buf->head[0]); 833 integ_len = svc_getnl(&buf->head[0]);
836 if (integ_len & 3) 834 if (integ_len & 3)
837 return stat; 835 return stat;
@@ -854,6 +852,8 @@ unwrap_integ_data(struct xdr_buf *buf, u32 seq, struct gss_ctx *ctx)
854 goto out; 852 goto out;
855 if (svc_getnl(&buf->head[0]) != seq) 853 if (svc_getnl(&buf->head[0]) != seq)
856 goto out; 854 goto out;
855 /* trim off the mic at the end before returning */
856 xdr_buf_trim(buf, mic.len + 4);
857 stat = 0; 857 stat = 0;
858out: 858out:
859 kfree(mic.data); 859 kfree(mic.data);
@@ -1198,7 +1198,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1198 /* placeholders for length and seq. number: */ 1198 /* placeholders for length and seq. number: */
1199 svc_putnl(resv, 0); 1199 svc_putnl(resv, 0);
1200 svc_putnl(resv, 0); 1200 svc_putnl(resv, 0);
1201 if (unwrap_integ_data(&rqstp->rq_arg, 1201 if (unwrap_integ_data(rqstp, &rqstp->rq_arg,
1202 gc->gc_seq, rsci->mechctx)) 1202 gc->gc_seq, rsci->mechctx))
1203 goto garbage_args; 1203 goto garbage_args;
1204 break; 1204 break;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 39a4112faf54..25d58e766014 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -196,9 +196,9 @@ EXPORT_SYMBOL_GPL(sunrpc_cache_update);
196 196
197static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) 197static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h)
198{ 198{
199 if (!cd->cache_upcall) 199 if (cd->cache_upcall)
200 return -EINVAL; 200 return cd->cache_upcall(cd, h);
201 return cd->cache_upcall(cd, h); 201 return sunrpc_cache_pipe_upcall(cd, h);
202} 202}
203 203
204static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) 204static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h)
@@ -750,6 +750,18 @@ struct cache_reader {
750 int offset; /* if non-0, we have a refcnt on next request */ 750 int offset; /* if non-0, we have a refcnt on next request */
751}; 751};
752 752
753static int cache_request(struct cache_detail *detail,
754 struct cache_request *crq)
755{
756 char *bp = crq->buf;
757 int len = PAGE_SIZE;
758
759 detail->cache_request(detail, crq->item, &bp, &len);
760 if (len < 0)
761 return -EAGAIN;
762 return PAGE_SIZE - len;
763}
764
753static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, 765static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
754 loff_t *ppos, struct cache_detail *cd) 766 loff_t *ppos, struct cache_detail *cd)
755{ 767{
@@ -784,6 +796,13 @@ static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
784 rq->readers++; 796 rq->readers++;
785 spin_unlock(&queue_lock); 797 spin_unlock(&queue_lock);
786 798
799 if (rq->len == 0) {
800 err = cache_request(cd, rq);
801 if (err < 0)
802 goto out;
803 rq->len = err;
804 }
805
787 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { 806 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
788 err = -EAGAIN; 807 err = -EAGAIN;
789 spin_lock(&queue_lock); 808 spin_lock(&queue_lock);
@@ -1140,17 +1159,14 @@ static bool cache_listeners_exist(struct cache_detail *detail)
1140 * 1159 *
1141 * Each request is at most one page long. 1160 * Each request is at most one page long.
1142 */ 1161 */
1143int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, 1162int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1144 void (*cache_request)(struct cache_detail *,
1145 struct cache_head *,
1146 char **,
1147 int *))
1148{ 1163{
1149 1164
1150 char *buf; 1165 char *buf;
1151 struct cache_request *crq; 1166 struct cache_request *crq;
1152 char *bp; 1167
1153 int len; 1168 if (!detail->cache_request)
1169 return -EINVAL;
1154 1170
1155 if (!cache_listeners_exist(detail)) { 1171 if (!cache_listeners_exist(detail)) {
1156 warn_no_listener(detail); 1172 warn_no_listener(detail);
@@ -1167,19 +1183,10 @@ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h,
1167 return -EAGAIN; 1183 return -EAGAIN;
1168 } 1184 }
1169 1185
1170 bp = buf; len = PAGE_SIZE;
1171
1172 cache_request(detail, h, &bp, &len);
1173
1174 if (len < 0) {
1175 kfree(buf);
1176 kfree(crq);
1177 return -EAGAIN;
1178 }
1179 crq->q.reader = 0; 1186 crq->q.reader = 0;
1180 crq->item = cache_get(h); 1187 crq->item = cache_get(h);
1181 crq->buf = buf; 1188 crq->buf = buf;
1182 crq->len = PAGE_SIZE - len; 1189 crq->len = 0;
1183 crq->readers = 0; 1190 crq->readers = 0;
1184 spin_lock(&queue_lock); 1191 spin_lock(&queue_lock);
1185 list_add_tail(&crq->q.list, &detail->queue); 1192 list_add_tail(&crq->q.list, &detail->queue);
@@ -1605,7 +1612,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1605 if (p == NULL) 1612 if (p == NULL)
1606 goto out_nomem; 1613 goto out_nomem;
1607 1614
1608 if (cd->cache_upcall || cd->cache_parse) { 1615 if (cd->cache_request || cd->cache_parse) {
1609 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, 1616 p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR,
1610 cd->u.procfs.proc_ent, 1617 cd->u.procfs.proc_ent,
1611 &cache_file_operations_procfs, cd); 1618 &cache_file_operations_procfs, cd);
@@ -1614,7 +1621,7 @@ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1614 goto out_nomem; 1621 goto out_nomem;
1615 } 1622 }
1616 if (cd->cache_show) { 1623 if (cd->cache_show) {
1617 p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR, 1624 p = proc_create_data("content", S_IFREG|S_IRUSR,
1618 cd->u.procfs.proc_ent, 1625 cd->u.procfs.proc_ent,
1619 &content_file_operations_procfs, cd); 1626 &content_file_operations_procfs, cd);
1620 cd->u.procfs.content_ent = p; 1627 cd->u.procfs.content_ent = p;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index a9f7906c1a6a..d7a369e61085 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -33,6 +33,7 @@
33#include <linux/rcupdate.h> 33#include <linux/rcupdate.h>
34 34
35#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/clnt.h>
36#include <linux/sunrpc/addr.h>
36#include <linux/sunrpc/rpc_pipe_fs.h> 37#include <linux/sunrpc/rpc_pipe_fs.h>
37#include <linux/sunrpc/metrics.h> 38#include <linux/sunrpc/metrics.h>
38#include <linux/sunrpc/bc_xprt.h> 39#include <linux/sunrpc/bc_xprt.h>
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 795a0f4e920b..3df764dc330c 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -26,6 +26,7 @@
26#include <net/ipv6.h> 26#include <net/ipv6.h>
27 27
28#include <linux/sunrpc/clnt.h> 28#include <linux/sunrpc/clnt.h>
29#include <linux/sunrpc/addr.h>
29#include <linux/sunrpc/sched.h> 30#include <linux/sunrpc/sched.h>
30#include <linux/sunrpc/xprtsock.h> 31#include <linux/sunrpc/xprtsock.h>
31 32
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index dbf12ac5ecb7..89a588b4478b 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled);
515 515
516void svc_shutdown_net(struct svc_serv *serv, struct net *net) 516void svc_shutdown_net(struct svc_serv *serv, struct net *net)
517{ 517{
518 /*
519 * The set of xprts (contained in the sv_tempsocks and
520 * sv_permsocks lists) is now constant, since it is modified
521 * only by accepting new sockets (done by service threads in
522 * svc_recv) or aging old ones (done by sv_temptimer), or
523 * configuration changes (excluded by whatever locking the
524 * caller is using--nfsd_mutex in the case of nfsd). So it's
525 * safe to traverse those lists and shut everything down:
526 */
527 svc_close_net(serv, net); 518 svc_close_net(serv, net);
528 519
529 if (serv->sv_shutdown) 520 if (serv->sv_shutdown)
@@ -1042,6 +1033,7 @@ static void svc_unregister(const struct svc_serv *serv, struct net *net)
1042/* 1033/*
1043 * dprintk the given error with the address of the client that caused it. 1034 * dprintk the given error with the address of the client that caused it.
1044 */ 1035 */
1036#ifdef RPC_DEBUG
1045static __printf(2, 3) 1037static __printf(2, 3)
1046void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) 1038void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1047{ 1039{
@@ -1058,6 +1050,9 @@ void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...)
1058 1050
1059 va_end(args); 1051 va_end(args);
1060} 1052}
1053#else
1054static __printf(2,3) void svc_printk(struct svc_rqst *rqstp, const char *fmt, ...) {}
1055#endif
1061 1056
1062/* 1057/*
1063 * Common routine for processing the RPC request. 1058 * Common routine for processing the RPC request.
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b8e47fac7315..80a6640f329b 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -499,7 +499,8 @@ void svc_wake_up(struct svc_serv *serv)
499 rqstp->rq_xprt = NULL; 499 rqstp->rq_xprt = NULL;
500 */ 500 */
501 wake_up(&rqstp->rq_wait); 501 wake_up(&rqstp->rq_wait);
502 } 502 } else
503 pool->sp_task_pending = 1;
503 spin_unlock_bh(&pool->sp_lock); 504 spin_unlock_bh(&pool->sp_lock);
504 } 505 }
505} 506}
@@ -634,7 +635,13 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
634 * long for cache updates. 635 * long for cache updates.
635 */ 636 */
636 rqstp->rq_chandle.thread_wait = 1*HZ; 637 rqstp->rq_chandle.thread_wait = 1*HZ;
638 pool->sp_task_pending = 0;
637 } else { 639 } else {
640 if (pool->sp_task_pending) {
641 pool->sp_task_pending = 0;
642 spin_unlock_bh(&pool->sp_lock);
643 return ERR_PTR(-EAGAIN);
644 }
638 /* No data pending. Go to sleep */ 645 /* No data pending. Go to sleep */
639 svc_thread_enqueue(pool, rqstp); 646 svc_thread_enqueue(pool, rqstp);
640 647
@@ -856,7 +863,6 @@ static void svc_age_temp_xprts(unsigned long closure)
856 struct svc_serv *serv = (struct svc_serv *)closure; 863 struct svc_serv *serv = (struct svc_serv *)closure;
857 struct svc_xprt *xprt; 864 struct svc_xprt *xprt;
858 struct list_head *le, *next; 865 struct list_head *le, *next;
859 LIST_HEAD(to_be_aged);
860 866
861 dprintk("svc_age_temp_xprts\n"); 867 dprintk("svc_age_temp_xprts\n");
862 868
@@ -877,25 +883,15 @@ static void svc_age_temp_xprts(unsigned long closure)
877 if (atomic_read(&xprt->xpt_ref.refcount) > 1 || 883 if (atomic_read(&xprt->xpt_ref.refcount) > 1 ||
878 test_bit(XPT_BUSY, &xprt->xpt_flags)) 884 test_bit(XPT_BUSY, &xprt->xpt_flags))
879 continue; 885 continue;
880 svc_xprt_get(xprt); 886 list_del_init(le);
881 list_move(le, &to_be_aged);
882 set_bit(XPT_CLOSE, &xprt->xpt_flags); 887 set_bit(XPT_CLOSE, &xprt->xpt_flags);
883 set_bit(XPT_DETACHED, &xprt->xpt_flags); 888 set_bit(XPT_DETACHED, &xprt->xpt_flags);
884 }
885 spin_unlock_bh(&serv->sv_lock);
886
887 while (!list_empty(&to_be_aged)) {
888 le = to_be_aged.next;
889 /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */
890 list_del_init(le);
891 xprt = list_entry(le, struct svc_xprt, xpt_list);
892
893 dprintk("queuing xprt %p for closing\n", xprt); 889 dprintk("queuing xprt %p for closing\n", xprt);
894 890
895 /* a thread will dequeue and close it soon */ 891 /* a thread will dequeue and close it soon */
896 svc_xprt_enqueue(xprt); 892 svc_xprt_enqueue(xprt);
897 svc_xprt_put(xprt);
898 } 893 }
894 spin_unlock_bh(&serv->sv_lock);
899 895
900 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 896 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ);
901} 897}
@@ -959,21 +955,24 @@ void svc_close_xprt(struct svc_xprt *xprt)
959} 955}
960EXPORT_SYMBOL_GPL(svc_close_xprt); 956EXPORT_SYMBOL_GPL(svc_close_xprt);
961 957
962static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) 958static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net)
963{ 959{
964 struct svc_xprt *xprt; 960 struct svc_xprt *xprt;
961 int ret = 0;
965 962
966 spin_lock(&serv->sv_lock); 963 spin_lock(&serv->sv_lock);
967 list_for_each_entry(xprt, xprt_list, xpt_list) { 964 list_for_each_entry(xprt, xprt_list, xpt_list) {
968 if (xprt->xpt_net != net) 965 if (xprt->xpt_net != net)
969 continue; 966 continue;
967 ret++;
970 set_bit(XPT_CLOSE, &xprt->xpt_flags); 968 set_bit(XPT_CLOSE, &xprt->xpt_flags);
971 set_bit(XPT_BUSY, &xprt->xpt_flags); 969 svc_xprt_enqueue(xprt);
972 } 970 }
973 spin_unlock(&serv->sv_lock); 971 spin_unlock(&serv->sv_lock);
972 return ret;
974} 973}
975 974
976static void svc_clear_pools(struct svc_serv *serv, struct net *net) 975static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net)
977{ 976{
978 struct svc_pool *pool; 977 struct svc_pool *pool;
979 struct svc_xprt *xprt; 978 struct svc_xprt *xprt;
@@ -988,42 +987,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net)
988 if (xprt->xpt_net != net) 987 if (xprt->xpt_net != net)
989 continue; 988 continue;
990 list_del_init(&xprt->xpt_ready); 989 list_del_init(&xprt->xpt_ready);
990 spin_unlock_bh(&pool->sp_lock);
991 return xprt;
991 } 992 }
992 spin_unlock_bh(&pool->sp_lock); 993 spin_unlock_bh(&pool->sp_lock);
993 } 994 }
995 return NULL;
994} 996}
995 997
996static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) 998static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net)
997{ 999{
998 struct svc_xprt *xprt; 1000 struct svc_xprt *xprt;
999 struct svc_xprt *tmp;
1000 LIST_HEAD(victims);
1001 1001
1002 spin_lock(&serv->sv_lock); 1002 while ((xprt = svc_dequeue_net(serv, net))) {
1003 list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { 1003 set_bit(XPT_CLOSE, &xprt->xpt_flags);
1004 if (xprt->xpt_net != net)
1005 continue;
1006 list_move(&xprt->xpt_list, &victims);
1007 }
1008 spin_unlock(&serv->sv_lock);
1009
1010 list_for_each_entry_safe(xprt, tmp, &victims, xpt_list)
1011 svc_delete_xprt(xprt); 1004 svc_delete_xprt(xprt);
1005 }
1012} 1006}
1013 1007
1008/*
1009 * Server threads may still be running (especially in the case where the
1010 * service is still running in other network namespaces).
1011 *
1012 * So we shut down sockets the same way we would on a running server, by
1013 * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do
1014 * the close. In the case there are no such other threads,
1015 * threads running, svc_clean_up_xprts() does a simple version of a
1016 * server's main event loop, and in the case where there are other
1017 * threads, we may need to wait a little while and then check again to
1018 * see if they're done.
1019 */
1014void svc_close_net(struct svc_serv *serv, struct net *net) 1020void svc_close_net(struct svc_serv *serv, struct net *net)
1015{ 1021{
1016 svc_close_list(serv, &serv->sv_tempsocks, net); 1022 int delay = 0;
1017 svc_close_list(serv, &serv->sv_permsocks, net);
1018 1023
1019 svc_clear_pools(serv, net); 1024 while (svc_close_list(serv, &serv->sv_permsocks, net) +
1020 /* 1025 svc_close_list(serv, &serv->sv_tempsocks, net)) {
1021 * At this point the sp_sockets lists will stay empty, since 1026
1022 * svc_xprt_enqueue will not add new entries without taking the 1027 svc_clean_up_xprts(serv, net);
1023 * sp_lock and checking XPT_BUSY. 1028 msleep(delay++);
1024 */ 1029 }
1025 svc_clear_list(serv, &serv->sv_tempsocks, net);
1026 svc_clear_list(serv, &serv->sv_permsocks, net);
1027} 1030}
1028 1031
1029/* 1032/*
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index a1852e19ed0c..c3f9e1ef7f53 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -6,6 +6,7 @@
6#include <linux/sunrpc/svcsock.h> 6#include <linux/sunrpc/svcsock.h>
7#include <linux/sunrpc/svcauth.h> 7#include <linux/sunrpc/svcauth.h>
8#include <linux/sunrpc/gss_api.h> 8#include <linux/sunrpc/gss_api.h>
9#include <linux/sunrpc/addr.h>
9#include <linux/err.h> 10#include <linux/err.h>
10#include <linux/seq_file.h> 11#include <linux/seq_file.h>
11#include <linux/hash.h> 12#include <linux/hash.h>
@@ -17,7 +18,6 @@
17#include <linux/user_namespace.h> 18#include <linux/user_namespace.h>
18#define RPCDBG_FACILITY RPCDBG_AUTH 19#define RPCDBG_FACILITY RPCDBG_AUTH
19 20
20#include <linux/sunrpc/clnt.h>
21 21
22#include "netns.h" 22#include "netns.h"
23 23
@@ -157,11 +157,6 @@ static void ip_map_request(struct cache_detail *cd,
157 (*bpp)[-1] = '\n'; 157 (*bpp)[-1] = '\n';
158} 158}
159 159
160static int ip_map_upcall(struct cache_detail *cd, struct cache_head *h)
161{
162 return sunrpc_cache_pipe_upcall(cd, h, ip_map_request);
163}
164
165static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr); 160static struct ip_map *__ip_map_lookup(struct cache_detail *cd, char *class, struct in6_addr *addr);
166static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry); 161static int __ip_map_update(struct cache_detail *cd, struct ip_map *ipm, struct unix_domain *udom, time_t expiry);
167 162
@@ -475,11 +470,6 @@ static void unix_gid_request(struct cache_detail *cd,
475 (*bpp)[-1] = '\n'; 470 (*bpp)[-1] = '\n';
476} 471}
477 472
478static int unix_gid_upcall(struct cache_detail *cd, struct cache_head *h)
479{
480 return sunrpc_cache_pipe_upcall(cd, h, unix_gid_request);
481}
482
483static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid); 473static struct unix_gid *unix_gid_lookup(struct cache_detail *cd, kuid_t uid);
484 474
485static int unix_gid_parse(struct cache_detail *cd, 475static int unix_gid_parse(struct cache_detail *cd,
@@ -586,7 +576,7 @@ static struct cache_detail unix_gid_cache_template = {
586 .hash_size = GID_HASHMAX, 576 .hash_size = GID_HASHMAX,
587 .name = "auth.unix.gid", 577 .name = "auth.unix.gid",
588 .cache_put = unix_gid_put, 578 .cache_put = unix_gid_put,
589 .cache_upcall = unix_gid_upcall, 579 .cache_request = unix_gid_request,
590 .cache_parse = unix_gid_parse, 580 .cache_parse = unix_gid_parse,
591 .cache_show = unix_gid_show, 581 .cache_show = unix_gid_show,
592 .match = unix_gid_match, 582 .match = unix_gid_match,
@@ -885,7 +875,7 @@ static struct cache_detail ip_map_cache_template = {
885 .hash_size = IP_HASHMAX, 875 .hash_size = IP_HASHMAX,
886 .name = "auth.unix.ip", 876 .name = "auth.unix.ip",
887 .cache_put = ip_map_put, 877 .cache_put = ip_map_put,
888 .cache_upcall = ip_map_upcall, 878 .cache_request = ip_map_request,
889 .cache_parse = ip_map_parse, 879 .cache_parse = ip_map_parse,
890 .cache_show = ip_map_show, 880 .cache_show = ip_map_show,
891 .match = ip_map_match, 881 .match = ip_map_match,
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 56055632f151..75edcfad6e26 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -879,6 +879,47 @@ xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
879} 879}
880EXPORT_SYMBOL_GPL(xdr_buf_subsegment); 880EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
881 881
882/**
883 * xdr_buf_trim - lop at most "len" bytes off the end of "buf"
884 * @buf: buf to be trimmed
885 * @len: number of bytes to reduce "buf" by
886 *
887 * Trim an xdr_buf by the given number of bytes by fixing up the lengths. Note
888 * that it's possible that we'll trim less than that amount if the xdr_buf is
889 * too small, or if (for instance) it's all in the head and the parser has
890 * already read too far into it.
891 */
892void xdr_buf_trim(struct xdr_buf *buf, unsigned int len)
893{
894 size_t cur;
895 unsigned int trim = len;
896
897 if (buf->tail[0].iov_len) {
898 cur = min_t(size_t, buf->tail[0].iov_len, trim);
899 buf->tail[0].iov_len -= cur;
900 trim -= cur;
901 if (!trim)
902 goto fix_len;
903 }
904
905 if (buf->page_len) {
906 cur = min_t(unsigned int, buf->page_len, trim);
907 buf->page_len -= cur;
908 trim -= cur;
909 if (!trim)
910 goto fix_len;
911 }
912
913 if (buf->head[0].iov_len) {
914 cur = min_t(size_t, buf->head[0].iov_len, trim);
915 buf->head[0].iov_len -= cur;
916 trim -= cur;
917 }
918fix_len:
919 buf->len -= (len - trim);
920}
921EXPORT_SYMBOL_GPL(xdr_buf_trim);
922
882static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len) 923static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
883{ 924{
884 unsigned int this_len; 925 unsigned int this_len;
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index d0074289708e..794312f22b9b 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -51,6 +51,7 @@
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/seq_file.h> 53#include <linux/seq_file.h>
54#include <linux/sunrpc/addr.h>
54 55
55#include "xprt_rdma.h" 56#include "xprt_rdma.h"
56 57
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 37cbda63f45c..c1d8476b7692 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -33,6 +33,7 @@
33#include <linux/udp.h> 33#include <linux/udp.h>
34#include <linux/tcp.h> 34#include <linux/tcp.h>
35#include <linux/sunrpc/clnt.h> 35#include <linux/sunrpc/clnt.h>
36#include <linux/sunrpc/addr.h>
36#include <linux/sunrpc/sched.h> 37#include <linux/sunrpc/sched.h>
37#include <linux/sunrpc/svcsock.h> 38#include <linux/sunrpc/svcsock.h>
38#include <linux/sunrpc/xprtsock.h> 39#include <linux/sunrpc/xprtsock.h>
@@ -1867,13 +1868,9 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt,
1867 * @xprt: RPC transport to connect 1868 * @xprt: RPC transport to connect
1868 * @transport: socket transport to connect 1869 * @transport: socket transport to connect
1869 * @create_sock: function to create a socket of the correct type 1870 * @create_sock: function to create a socket of the correct type
1870 *
1871 * Invoked by a work queue tasklet.
1872 */ 1871 */
1873static void xs_local_setup_socket(struct work_struct *work) 1872static int xs_local_setup_socket(struct sock_xprt *transport)
1874{ 1873{
1875 struct sock_xprt *transport =
1876 container_of(work, struct sock_xprt, connect_worker.work);
1877 struct rpc_xprt *xprt = &transport->xprt; 1874 struct rpc_xprt *xprt = &transport->xprt;
1878 struct socket *sock; 1875 struct socket *sock;
1879 int status = -EIO; 1876 int status = -EIO;
@@ -1918,6 +1915,30 @@ out:
1918 xprt_clear_connecting(xprt); 1915 xprt_clear_connecting(xprt);
1919 xprt_wake_pending_tasks(xprt, status); 1916 xprt_wake_pending_tasks(xprt, status);
1920 current->flags &= ~PF_FSTRANS; 1917 current->flags &= ~PF_FSTRANS;
1918 return status;
1919}
1920
1921static void xs_local_connect(struct rpc_xprt *xprt, struct rpc_task *task)
1922{
1923 struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
1924 int ret;
1925
1926 if (RPC_IS_ASYNC(task)) {
1927 /*
1928 * We want the AF_LOCAL connect to be resolved in the
1929 * filesystem namespace of the process making the rpc
1930 * call. Thus we connect synchronously.
1931 *
1932 * If we want to support asynchronous AF_LOCAL calls,
1933 * we'll need to figure out how to pass a namespace to
1934 * connect.
1935 */
1936 rpc_exit(task, -ENOTCONN);
1937 return;
1938 }
1939 ret = xs_local_setup_socket(transport);
1940 if (ret && !RPC_IS_SOFTCONN(task))
1941 msleep_interruptible(15000);
1921} 1942}
1922 1943
1923#ifdef CONFIG_SUNRPC_SWAP 1944#ifdef CONFIG_SUNRPC_SWAP
@@ -2455,7 +2476,7 @@ static struct rpc_xprt_ops xs_local_ops = {
2455 .alloc_slot = xprt_alloc_slot, 2476 .alloc_slot = xprt_alloc_slot,
2456 .rpcbind = xs_local_rpcbind, 2477 .rpcbind = xs_local_rpcbind,
2457 .set_port = xs_local_set_port, 2478 .set_port = xs_local_set_port,
2458 .connect = xs_connect, 2479 .connect = xs_local_connect,
2459 .buf_alloc = rpc_malloc, 2480 .buf_alloc = rpc_malloc,
2460 .buf_free = rpc_free, 2481 .buf_free = rpc_free,
2461 .send_request = xs_local_send_request, 2482 .send_request = xs_local_send_request,
@@ -2628,8 +2649,6 @@ static struct rpc_xprt *xs_setup_local(struct xprt_create *args)
2628 goto out_err; 2649 goto out_err;
2629 } 2650 }
2630 xprt_set_bound(xprt); 2651 xprt_set_bound(xprt);
2631 INIT_DELAYED_WORK(&transport->connect_worker,
2632 xs_local_setup_socket);
2633 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL); 2652 xs_format_peer_addresses(xprt, "local", RPCBIND_NETID_LOCAL);
2634 break; 2653 break;
2635 default: 2654 default: