aboutsummaryrefslogtreecommitdiffstats
path: root/net/sunrpc/svcsock.c
diff options
context:
space:
mode:
authorGreg Banks <gnb@melbourne.sgi.com>2006-10-02 05:17:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-02 10:57:19 -0400
commit1a68d952af5f43032012d26dd0d5164c9e9986bc (patch)
tree6c660f3ee27205a4c4f18883b2205f873cf396be /net/sunrpc/svcsock.c
parentc45c357d7dbc9e94338f44349e0035149da86b26 (diff)
[PATCH] knfsd: use new lock for svc_sock deferred list
Protect the svc_sock->sk_deferred list with a new lock svc_sock->sk_defer_lock instead of svc_serv->sv_lock. Using the more fine-grained lock reduces the number of places we need to take the svc_serv lock. Signed-off-by: Greg Banks <gnb@melbourne.sgi.com> Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net/sunrpc/svcsock.c')
-rw-r--r--net/sunrpc/svcsock.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index d836031e4581..bdb5c2841db7 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -47,6 +47,7 @@
47/* SMP locking strategy: 47/* SMP locking strategy:
48 * 48 *
49 * svc_serv->sv_lock protects most stuff for that service. 49 * svc_serv->sv_lock protects most stuff for that service.
50 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
50 * 51 *
51 * Some flags can be set to certain values at any time 52 * Some flags can be set to certain values at any time
52 * providing that certain rules are followed: 53 * providing that certain rules are followed:
@@ -1416,6 +1417,7 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock,
1416 svsk->sk_server = serv; 1417 svsk->sk_server = serv;
1417 atomic_set(&svsk->sk_inuse, 0); 1418 atomic_set(&svsk->sk_inuse, 0);
1418 svsk->sk_lastrecv = get_seconds(); 1419 svsk->sk_lastrecv = get_seconds();
1420 spin_lock_init(&svsk->sk_defer_lock);
1419 INIT_LIST_HEAD(&svsk->sk_deferred); 1421 INIT_LIST_HEAD(&svsk->sk_deferred);
1420 INIT_LIST_HEAD(&svsk->sk_ready); 1422 INIT_LIST_HEAD(&svsk->sk_ready);
1421 mutex_init(&svsk->sk_mutex); 1423 mutex_init(&svsk->sk_mutex);
@@ -1594,7 +1596,6 @@ svc_makesock(struct svc_serv *serv, int protocol, unsigned short port)
1594static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1596static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1595{ 1597{
1596 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1598 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle);
1597 struct svc_serv *serv = dreq->owner;
1598 struct svc_sock *svsk; 1599 struct svc_sock *svsk;
1599 1600
1600 if (too_many) { 1601 if (too_many) {
@@ -1605,9 +1606,9 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
1605 dprintk("revisit queued\n"); 1606 dprintk("revisit queued\n");
1606 svsk = dr->svsk; 1607 svsk = dr->svsk;
1607 dr->svsk = NULL; 1608 dr->svsk = NULL;
1608 spin_lock_bh(&serv->sv_lock); 1609 spin_lock_bh(&svsk->sk_defer_lock);
1609 list_add(&dr->handle.recent, &svsk->sk_deferred); 1610 list_add(&dr->handle.recent, &svsk->sk_deferred);
1610 spin_unlock_bh(&serv->sv_lock); 1611 spin_unlock_bh(&svsk->sk_defer_lock);
1611 set_bit(SK_DEFERRED, &svsk->sk_flags); 1612 set_bit(SK_DEFERRED, &svsk->sk_flags);
1612 svc_sock_enqueue(svsk); 1613 svc_sock_enqueue(svsk);
1613 svc_sock_put(svsk); 1614 svc_sock_put(svsk);
@@ -1667,11 +1668,10 @@ static int svc_deferred_recv(struct svc_rqst *rqstp)
1667static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1668static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1668{ 1669{
1669 struct svc_deferred_req *dr = NULL; 1670 struct svc_deferred_req *dr = NULL;
1670 struct svc_serv *serv = svsk->sk_server;
1671 1671
1672 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1672 if (!test_bit(SK_DEFERRED, &svsk->sk_flags))
1673 return NULL; 1673 return NULL;
1674 spin_lock_bh(&serv->sv_lock); 1674 spin_lock_bh(&svsk->sk_defer_lock);
1675 clear_bit(SK_DEFERRED, &svsk->sk_flags); 1675 clear_bit(SK_DEFERRED, &svsk->sk_flags);
1676 if (!list_empty(&svsk->sk_deferred)) { 1676 if (!list_empty(&svsk->sk_deferred)) {
1677 dr = list_entry(svsk->sk_deferred.next, 1677 dr = list_entry(svsk->sk_deferred.next,
@@ -1680,6 +1680,6 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk)
1680 list_del_init(&dr->handle.recent); 1680 list_del_init(&dr->handle.recent);
1681 set_bit(SK_DEFERRED, &svsk->sk_flags); 1681 set_bit(SK_DEFERRED, &svsk->sk_flags);
1682 } 1682 }
1683 spin_unlock_bh(&serv->sv_lock); 1683 spin_unlock_bh(&svsk->sk_defer_lock);
1684 return dr; 1684 return dr;
1685} 1685}