aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.de>2007-05-09 05:34:48 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-05-09 15:30:54 -0400
commit7ac1bea5507218da03f6005d228789da5a831c3f (patch)
tree43c0a68d468352f8ca1f42ada7970e3a97411f5b
parentf34b95689d2ce001c157b1604289ff240b4bdee0 (diff)
knfsd: rename sk_defer_lock to sk_lock
Now that sk_defer_lock protects two different things, make the name more generic. Also don't bother with disabling _bh as the lock is only ever taken from process context. Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/sunrpc/svcsock.h3
-rw-r--r--net/sunrpc/svcauth_unix.c10
-rw-r--r--net/sunrpc/svcsock.c13
3 files changed, 14 insertions, 12 deletions
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h
index 7909687557bf..e21dd93ac4b7 100644
--- a/include/linux/sunrpc/svcsock.h
+++ b/include/linux/sunrpc/svcsock.h
@@ -37,7 +37,8 @@ struct svc_sock {
37 37
38 atomic_t sk_reserved; /* space on outq that is reserved */ 38 atomic_t sk_reserved; /* space on outq that is reserved */
39 39
40 spinlock_t sk_defer_lock; /* protects sk_deferred */ 40 spinlock_t sk_lock; /* protects sk_deferred and
41 * sk_info_authunix */
41 struct list_head sk_deferred; /* deferred requests that need to 42 struct list_head sk_deferred; /* deferred requests that need to
42 * be revisted */ 43 * be revisted */
43 struct mutex sk_mutex; /* to serialize sending data */ 44 struct mutex sk_mutex; /* to serialize sending data */
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 2bd23ea2aa8b..07dcd20cbee4 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -385,7 +385,7 @@ ip_map_cached_get(struct svc_rqst *rqstp)
385{ 385{
386 struct ip_map *ipm; 386 struct ip_map *ipm;
387 struct svc_sock *svsk = rqstp->rq_sock; 387 struct svc_sock *svsk = rqstp->rq_sock;
388 spin_lock_bh(&svsk->sk_defer_lock); 388 spin_lock(&svsk->sk_lock);
389 ipm = svsk->sk_info_authunix; 389 ipm = svsk->sk_info_authunix;
390 if (ipm != NULL) { 390 if (ipm != NULL) {
391 if (!cache_valid(&ipm->h)) { 391 if (!cache_valid(&ipm->h)) {
@@ -395,13 +395,13 @@ ip_map_cached_get(struct svc_rqst *rqstp)
395 * same IP address. 395 * same IP address.
396 */ 396 */
397 svsk->sk_info_authunix = NULL; 397 svsk->sk_info_authunix = NULL;
398 spin_unlock_bh(&svsk->sk_defer_lock); 398 spin_unlock(&svsk->sk_lock);
399 cache_put(&ipm->h, &ip_map_cache); 399 cache_put(&ipm->h, &ip_map_cache);
400 return NULL; 400 return NULL;
401 } 401 }
402 cache_get(&ipm->h); 402 cache_get(&ipm->h);
403 } 403 }
404 spin_unlock_bh(&svsk->sk_defer_lock); 404 spin_unlock(&svsk->sk_lock);
405 return ipm; 405 return ipm;
406} 406}
407 407
@@ -410,14 +410,14 @@ ip_map_cached_put(struct svc_rqst *rqstp, struct ip_map *ipm)
410{ 410{
411 struct svc_sock *svsk = rqstp->rq_sock; 411 struct svc_sock *svsk = rqstp->rq_sock;
412 412
413 spin_lock_bh(&svsk->sk_defer_lock); 413 spin_lock(&svsk->sk_lock);
414 if (svsk->sk_sock->type == SOCK_STREAM && 414 if (svsk->sk_sock->type == SOCK_STREAM &&
415 svsk->sk_info_authunix == NULL) { 415 svsk->sk_info_authunix == NULL) {
416 /* newly cached, keep the reference */ 416 /* newly cached, keep the reference */
417 svsk->sk_info_authunix = ipm; 417 svsk->sk_info_authunix = ipm;
418 ipm = NULL; 418 ipm = NULL;
419 } 419 }
420 spin_unlock_bh(&svsk->sk_defer_lock); 420 spin_unlock(&svsk->sk_lock);
421 if (ipm) 421 if (ipm)
422 cache_put(&ipm->h, &ip_map_cache); 422 cache_put(&ipm->h, &ip_map_cache);
423} 423}
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 22f61aee4824..fdb1386f5dcd 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -53,7 +53,8 @@
53 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 53 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt.
54 * when both need to be taken (rare), svc_serv->sv_lock is first. 54 * when both need to be taken (rare), svc_serv->sv_lock is first.
55 * BKL protects svc_serv->sv_nrthread. 55 * BKL protects svc_serv->sv_nrthread.
56 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list 56 * svc_sock->sk_lock protects the svc_sock->sk_deferred list
57 * and the ->sk_info_authunix cache.
57 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. 58 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
58 * 59 *
59 * Some flags can be set to certain values at any time 60 * Some flags can be set to certain values at any time
@@ -1633,7 +1634,7 @@ static struct svc_sock *svc_setup_socket(struct svc_serv *serv,
1633 svsk->sk_server = serv; 1634 svsk->sk_server = serv;
1634 atomic_set(&svsk->sk_inuse, 1); 1635 atomic_set(&svsk->sk_inuse, 1);
1635 svsk->sk_lastrecv = get_seconds(); 1636 svsk->sk_lastrecv = get_seconds();
1636 spin_lock_init(&svsk->sk_defer_lock); 1637 spin_lock_init(&svsk->sk_lock);
1637 INIT_LIST_HEAD(&svsk->sk_deferred); 1638 INIT_LIST_HEAD(&svsk->sk_deferred);
1638 INIT_LIST_HEAD(&svsk->sk_ready); 1639 INIT_LIST_HEAD(&svsk->sk_ready);
1639 mutex_init(&svsk->sk_mutex); 1640 mutex_init(&svsk->sk_mutex);
@@ -1857,9 +1858,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1857 dprintk("revisit queued\n"); 1858 dprintk("revisit queued\n");
1858 svsk = dr->svsk; 1859 svsk = dr->svsk;
1859 dr->svsk = NULL; 1860 dr->svsk = NULL;
1860 spin_lock_bh(&svsk->sk_defer_lock); 1861 spin_lock(&svsk->sk_lock);
1861 list_add(&dr->handle.recent, &svsk->sk_deferred); 1862 list_add(&dr->handle.recent, &svsk->sk_deferred);
1862 spin_unlock_bh(&svsk->sk_defer_lock); 1863 spin_unlock(&svsk->sk_lock);
1863 set_bit(SK_DEFERRED, &svsk->sk_flags); 1864 set_bit(SK_DEFERRED, &svsk->sk_flags);
1864 svc_sock_enqueue(svsk); 1865 svc_sock_enqueue(svsk);
1865 svc_sock_put(svsk); 1866 svc_sock_put(svsk);
@@ -1925,7 +1926,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1925 1926
1926 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1927 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1927 return NULL; 1928 return NULL;
1928 spin_lock_bh(&svsk->sk_defer_lock); 1929 spin_lock(&svsk->sk_lock);
1929 clear_bit(SK_DEFERRED, &svsk->sk_flags); 1930 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1930 if (!list_empty(&svsk->sk_deferred)) { 1931 if (!list_empty(&svsk->sk_deferred)) {
1931 dr = list_entry(svsk->sk_deferred.next, 1932 dr = list_entry(svsk->sk_deferred.next,
@@ -1934,6 +1935,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1934 list_del_init(&dr->handle.recent); 1935 list_del_init(&dr->handle.recent);
1935 set_bit(SK_DEFERRED, &svsk->sk_flags); 1936 set_bit(SK_DEFERRED, &svsk->sk_flags);
1936 } 1937 }
1937 spin_unlock_bh(&svsk->sk_defer_lock); 1938 spin_unlock(&svsk->sk_lock);
1938 return dr; 1939 return dr;
1939} 1940}