aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svcauth_unix.c
diff options
context:
space:
mode:
authorTom Tucker <tom@opengridcomputing.com>2007-12-30 22:08:08 -0500
committerJ. Bruce Fields <bfields@citi.umich.edu>2008-02-01 16:42:12 -0500
commitdef13d7401e9b95bbd34c20057ebeb2972708b1b (patch)
treeafea72afdfe80c645eaf75aa828a49a6e1dec864 /net/sunrpc/svcauth_unix.c
parent4bc6c497b26a7984cac842a09e2e8f8c46242782 (diff)
svc: Move the authinfo cache to svc_xprt.
Move the authinfo cache to svc_xprt. This allows both the TCP and RDMA transports to share this logic. A flag bit is used to determine if auth information is to be cached or not. Previously, this code looked at the transport protocol. I've also changed the spin_lock/unlock logic so that a lock is not taken for transports that are not caching auth info. Signed-off-by: Tom Tucker <tom@opengridcomputing.com> Acked-by: Neil Brown <neilb@suse.de> Reviewed-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Greg Banks <gnb@sgi.com> Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc/svcauth_unix.c')
-rw-r--r--net/sunrpc/svcauth_unix.c54
1 files changed, 29 insertions, 25 deletions
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 411479411b21..6815157bd65c 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -384,41 +384,45 @@ void svcauth_unix_purge(void)
384static inline struct ip_map * 384static inline struct ip_map *
385ip_map_cached_get(struct svc_rqst *rqstp) 385ip_map_cached_get(struct svc_rqst *rqstp)
386{ 386{
387 struct ip_map *ipm; 387 struct ip_map *ipm = NULL;
388 struct svc_sock *svsk = rqstp->rq_sock; 388 struct svc_xprt *xprt = rqstp->rq_xprt;
389 spin_lock(&svsk->sk_lock); 389
390 ipm = svsk->sk_info_authunix; 390 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
391 if (ipm != NULL) { 391 spin_lock(&xprt->xpt_lock);
392 if (!cache_valid(&ipm->h)) { 392 ipm = xprt->xpt_auth_cache;
393 /* 393 if (ipm != NULL) {
394 * The entry has been invalidated since it was 394 if (!cache_valid(&ipm->h)) {
395 * remembered, e.g. by a second mount from the 395 /*
396 * same IP address. 396 * The entry has been invalidated since it was
397 */ 397 * remembered, e.g. by a second mount from the
398 svsk->sk_info_authunix = NULL; 398 * same IP address.
399 spin_unlock(&svsk->sk_lock); 399 */
400 cache_put(&ipm->h, &ip_map_cache); 400 xprt->xpt_auth_cache = NULL;
401 return NULL; 401 spin_unlock(&xprt->xpt_lock);
402 cache_put(&ipm->h, &ip_map_cache);
403 return NULL;
404 }
405 cache_get(&ipm->h);
402 } 406 }
403 cache_get(&ipm->h); 407 spin_unlock(&xprt->xpt_lock);
404 } 408 }
405 spin_unlock(&svsk->sk_lock);
406 return ipm; 409 return ipm;
407} 410}
408 411
409static inline void 412static inline void
410ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm) 413ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
411{ 414{
412 struct svc_sock *svsk = rqstp->rq_sock; 415 struct svc_xprt *xprt = rqstp->rq_xprt;
413 416
414 spin_lock(&svsk->sk_lock); 417 if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) {
415 if (svsk->sk_sock->type == SOCK_STREAM && 418 spin_lock(&xprt->xpt_lock);
416 svsk->sk_info_authunix == NULL) { 419 if (xprt->xpt_auth_cache == NULL) {
417 /* newly cached, keep the reference */ 420 /* newly cached, keep the reference */
418 svsk->sk_info_authunix = ipm; 421 xprt->xpt_auth_cache = ipm;
419 ipm = NULL; 422 ipm = NULL;
423 }
424 spin_unlock(&xprt->xpt_lock);
420 } 425 }
421 spin_unlock(&svsk->sk_lock);
422 if (ipm) 426 if (ipm)
423 cache_put(&ipm->h, &ip_map_cache); 427 cache_put(&ipm->h, &ip_map_cache);
424} 428}