aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2007-12-30 22:08:08 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-02-01 16:42:12 -0500
commitdef13d7401e9b95bbd34c20057ebeb2972708b1b (patch)
treeafea72afdfe80c645eaf75aa828a49a6e1dec864 /net
parent4bc6c497b26a7984cac842a09e2e8f8c46242782 (diff)
svc: Move the authinfo cache to svc_xprt.
Move the authinfo cache to svc_xprt. This allows both the TCP and RDMA transports to share this logic. A flag bit is used to determine if auth information is to be cached or not. Previously, this code looked at the transport protocol. I've also changed the spin_lock/unlock logic so that a lock is not taken for transports that are not caching auth info. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svc_xprt.c4
-rw-r--r--net/sunrpc/svcauth_unix.c54
-rw-r--r--net/sunrpc/svcsock.c22
3 files changed, 45 insertions, 35 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3e6a1c81d4ce..d2ac130b9040 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -75,6 +75,9 @@ static void svc_xprt_free(struct kref *kref)
75 struct svc_xprt *xprt = 75 struct svc_xprt *xprt =
76 container_of(kref, struct svc_xprt, xpt_ref); 76 container_of(kref, struct svc_xprt, xpt_ref);
77 struct module *owner = xprt->xpt_class->xcl_owner; 77 struct module *owner = xprt->xpt_class->xcl_owner;
78 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)
79 && xprt->xpt_auth_cache != NULL)
80 svcauth_unix_info_release(xprt->xpt_auth_cache);
78 xprt->xpt_ops->xpo_free(xprt); 81 xprt->xpt_ops->xpo_free(xprt);
79 module_put(owner); 82 module_put(owner);
80} 83}
@@ -100,6 +103,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt,
100 INIT_LIST_HEAD(&xprt->xpt_list); 103 INIT_LIST_HEAD(&xprt->xpt_list);
101 INIT_LIST_HEAD(&xprt->xpt_ready); 104 INIT_LIST_HEAD(&xprt->xpt_ready);
102 mutex_init(&xprt->xpt_mutex); 105 mutex_init(&xprt->xpt_mutex);
106 spin_lock_init(&xprt->xpt_lock);
103} 107}
104EXPORT_SYMBOL_GPL(svc_xprt_init); 108EXPORT_SYMBOL_GPL(svc_xprt_init);
105 109
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 411479411b21..6815157bd65c 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -384,41 +384,45 @@ void svcauth_unix_purge(void)
384static inline struct ip_map * 384static inline struct ip_map *
385ip_map_cached_get(struct svc_rqst *rqstp) 385ip_map_cached_get(struct svc_rqst *rqstp)
386{ 386{
387 struct ip_map *ipm; 387 struct ip_map *ipm = NULL;
388 struct svc_sock *svsk = rqstp->rq_sock; 388 struct svc_xprt *xprt = rqstp->rq_xprt;
389 spin_lock(&svsk->sk_lock); 389
390 ipm = svsk->sk_info_authunix; 390 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
391 if (ipm != NULL) { 391 spin_lock(&xprt->xpt_lock);
392 if (!cache_valid(&ipm->h)) { 392 ipm = xprt->xpt_auth_cache;
393 /* 393 if (ipm != NULL) {
394 * The entry has been invalidated since it was 394 if (!cache_valid(&ipm->h)) {
395 * remembered, e.g. by a second mount from the 395 /*
396 * same IP address. 396 * The entry has been invalidated since it was
397 */ 397 * remembered, e.g. by a second mount from the
398 svsk->sk_info_authunix = NULL; 398 * same IP address.
399 spin_unlock(&svsk->sk_lock); 399 */
400 cache_put(&ipm->h, &ip_map_cache); 400 xprt->xpt_auth_cache = NULL;
401 return NULL; 401 spin_unlock(&xprt->xpt_lock);
402 cache_put(&ipm->h, &ip_map_cache);
403 return NULL;
404 }
405 cache_get(&ipm->h);
402 } 406 }
403 cache_get(&ipm->h); 407 spin_unlock(&xprt->xpt_lock);
404 } 408 }
405 spin_unlock(&svsk->sk_lock);
406 return ipm; 409 return ipm;
407} 410}
408 411
409static inline void 412static inline void
410ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) 413ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
411{ 414{
412 struct svc_sock *svsk = rqstp->rq_sock; 415 struct svc_xprt *xprt = rqstp->rq_xprt;
413 416
414 spin_lock(&svsk->sk_lock); 417 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
415 if (svsk->sk_sock->type == SOCK_STREAM && 418 spin_lock(&xprt->xpt_lock);
416 svsk->sk_info_authunix == NULL) { 419 if (xprt->xpt_auth_cache == NULL) {
417 /* newly cached, keep the reference */ 420 /* newly cached, keep the reference */
418 svsk->sk_info_authunix = ipm; 421 xprt->xpt_auth_cache = ipm;
419 ipm = NULL; 422 ipm = NULL;
423 }
424 spin_unlock(&xprt->xpt_lock);
420 } 425 }
421 spin_unlock(&svsk->sk_lock);
422 if (ipm) 426 if (ipm)
423 cache_put(&ipm->h, &ip_map_cache); 427 cache_put(&ipm->h, &ip_map_cache);
424} 428}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 2390286e1827..5c9422c9a980 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -113,12 +113,16 @@ static inline void svc_reclassify_socket(struct socket *sock)
113 switch (sk->sk_family) { 113 switch (sk->sk_family) {
114 case AF_INET: 114 case AF_INET:
115 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 115 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD",
116 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); 116 &svc_slock_key[0],
117 "sk_xprt.xpt_lock-AF_INET-NFSD",
118 &svc_key[0]);
117 break; 119 break;
118 120
119 case AF_INET6: 121 case AF_INET6:
120 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", 122 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD",
121 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); 123 &svc_slock_key[1],
124 "sk_xprt.xpt_lock-AF_INET6-NFSD",
125 &svc_key[1]);
122 break; 126 break;
123 127
124 default: 128 default:
@@ -930,6 +934,7 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv)
930 mm_segment_t oldfs; 934 mm_segment_t oldfs;
931 935
932 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); 936 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv);
937 clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
933 svsk->sk_sk->sk_data_ready = svc_udp_data_ready; 938 svsk->sk_sk->sk_data_ready = svc_udp_data_ready;
934 svsk->sk_sk->sk_write_space = svc_write_space; 939 svsk->sk_sk->sk_write_space = svc_write_space;
935 940
@@ -1385,7 +1390,7 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv)
1385 struct tcp_sock *tp = tcp_sk(sk); 1390 struct tcp_sock *tp = tcp_sk(sk);
1386 1391
1387 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); 1392 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv);
1388 1393 set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags);
1389 if (sk->sk_state == TCP_LISTEN) { 1394 if (sk->sk_state == TCP_LISTEN) {
1390 dprintk("setting up TCP socket for listening\n"); 1395 dprintk("setting up TCP socket for listening\n");
1391 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags); 1396 set_bit(XPT_LISTENER, &svsk->sk_xprt.xpt_flags);
@@ -1753,7 +1758,6 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1753 svsk->sk_ostate = inet->sk_state_change; 1758 svsk->sk_ostate = inet->sk_state_change;
1754 svsk->sk_odata = inet->sk_data_ready; 1759 svsk->sk_odata = inet->sk_data_ready;
1755 svsk->sk_owspace = inet->sk_write_space; 1760 svsk->sk_owspace = inet->sk_write_space;
1756 spin_lock_init(&svsk->sk_lock);
1757 INIT_LIST_HEAD(&svsk->sk_deferred); 1761 INIT_LIST_HEAD(&svsk->sk_deferred);
1758 1762
1759 /* Initialize the socket */ 1763 /* Initialize the socket */
@@ -1898,8 +1902,6 @@ static void svc_sock_free(struct svc_xprt *xprt)
1898 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1902 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt);
1899 dprintk("svc: svc_sock_free(%p)\n", svsk); 1903 dprintk("svc: svc_sock_free(%p)\n", svsk);
1900 1904
1901 if (svsk->sk_info_authunix != NULL)
1902 svcauth_unix_info_release(svsk->sk_info_authunix);
1903 if (svsk->sk_sock->file) 1905 if (svsk->sk_sock->file)
1904 sockfd_put(svsk->sk_sock); 1906 sockfd_put(svsk->sk_sock);
1905 else 1907 else
@@ -1984,9 +1986,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1984 dprintk("revisit queued\n"); 1986 dprintk("revisit queued\n");
1985 svsk = dr->svsk; 1987 svsk = dr->svsk;
1986 dr->svsk = NULL; 1988 dr->svsk = NULL;
1987 spin_lock(&svsk->sk_lock); 1989 spin_lock(&svsk->sk_xprt.xpt_lock);
1988 list_add(&dr->handle.recent, &svsk->sk_deferred); 1990 list_add(&dr->handle.recent, &svsk->sk_deferred);
1989 spin_unlock(&svsk->sk_lock); 1991 spin_unlock(&svsk->sk_xprt.xpt_lock);
1990 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); 1992 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
1991 svc_xprt_enqueue(&svsk->sk_xprt); 1993 svc_xprt_enqueue(&svsk->sk_xprt);
1992 svc_xprt_put(&svsk->sk_xprt); 1994 svc_xprt_put(&svsk->sk_xprt);
@@ -2052,7 +2054,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
2052 2054
2053 if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags)) 2055 if (!test_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags))
2054 return NULL; 2056 return NULL;
2055 spin_lock(&svsk->sk_lock); 2057 spin_lock(&svsk->sk_xprt.xpt_lock);
2056 clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); 2058 clear_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
2057 if (!list_empty(&svsk->sk_deferred)) { 2059 if (!list_empty(&svsk->sk_deferred)) {
2058 dr = list_entry(svsk->sk_deferred.next, 2060 dr = list_entry(svsk->sk_deferred.next,
@@ -2061,6 +2063,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
2061 list_del_init(&dr->handle.recent); 2063 list_del_init(&dr->handle.recent);
2062 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags); 2064 set_bit(XPT_DEFERRED, &svsk->sk_xprt.xpt_flags);
2063 } 2065 }
2064 spin_unlock(&svsk->sk_lock); 2066 spin_unlock(&svsk->sk_xprt.xpt_lock);
2065 return dr; 2067 return dr;
2066} 2068}