diff options
author | Greg Banks <gnb@melbourne.sgi.com> | 2006-10-02 05:17:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-10-02 10:57:19 -0400 |
commit | c45c357d7dbc9e94338f44349e0035149da86b26 (patch) | |
tree | b04388ccaccd5fb304b4bd8be57258d52e79f886 | |
parent | 36bdfc8bae51339aa27ef8e4ce148185293061ae (diff) |
[PATCH] knfsd: convert sk_inuse to atomic_t
Convert the svc_sock->sk_inuse counter from an int protected by
svc_serv->sv_lock, to an atomic. This reduces the number of places we need to
take the (effectively global) svc_serv->sv_lock.
Signed-off-by: Greg Banks <gnb@melbourne.sgi.com>
Signed-off-by: Neil Brown <neilb@suse.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | include/linux/sunrpc/svcsock.h | 2 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 29 |
2 files changed, 12 insertions, 19 deletions
diff --git a/include/linux/sunrpc/svcsock.h b/include/linux/sunrpc/svcsock.h index 846aee95eec7..17cb834a748c 100644 --- a/include/linux/sunrpc/svcsock.h +++ b/include/linux/sunrpc/svcsock.h | |||
@@ -21,7 +21,7 @@ struct svc_sock { | |||
21 | struct sock * sk_sk; /* INET layer */ | 21 | struct sock * sk_sk; /* INET layer */ |
22 | 22 | ||
23 | struct svc_serv * sk_server; /* service for this socket */ | 23 | struct svc_serv * sk_server; /* service for this socket */ |
24 | unsigned int sk_inuse; /* use count */ | 24 | atomic_t sk_inuse; /* use count */ |
25 | unsigned long sk_flags; | 25 | unsigned long sk_flags; |
26 | #define SK_BUSY 0 /* enqueued/receiving */ | 26 | #define SK_BUSY 0 /* enqueued/receiving */ |
27 | #define SK_CONN 1 /* conn pending */ | 27 | #define SK_CONN 1 /* conn pending */ |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 9ba1a071ff06..d836031e4581 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -206,7 +206,7 @@ svc_sock_enqueue(struct svc_sock *svsk) | |||
206 | "svc_sock_enqueue: server %p, rq_sock=%p!\n", | 206 | "svc_sock_enqueue: server %p, rq_sock=%p!\n", |
207 | rqstp, rqstp->rq_sock); | 207 | rqstp, rqstp->rq_sock); |
208 | rqstp->rq_sock = svsk; | 208 | rqstp->rq_sock = svsk; |
209 | svsk->sk_inuse++; | 209 | atomic_inc(&svsk->sk_inuse); |
210 | rqstp->rq_reserved = serv->sv_bufsz; | 210 | rqstp->rq_reserved = serv->sv_bufsz; |
211 | svsk->sk_reserved += rqstp->rq_reserved; | 211 | svsk->sk_reserved += rqstp->rq_reserved; |
212 | wake_up(&rqstp->rq_wait); | 212 | wake_up(&rqstp->rq_wait); |
@@ -235,7 +235,7 @@ svc_sock_dequeue(struct svc_serv *serv) | |||
235 | list_del_init(&svsk->sk_ready); | 235 | list_del_init(&svsk->sk_ready); |
236 | 236 | ||
237 | dprintk("svc: socket %p dequeued, inuse=%d\n", | 237 | dprintk("svc: socket %p dequeued, inuse=%d\n", |
238 | svsk->sk_sk, svsk->sk_inuse); | 238 | svsk->sk_sk, atomic_read(&svsk->sk_inuse)); |
239 | 239 | ||
240 | return svsk; | 240 | return svsk; |
241 | } | 241 | } |
@@ -285,17 +285,11 @@ void svc_reserve(struct svc_rqst *rqstp, int space) | |||
285 | static inline void | 285 | static inline void |
286 | svc_sock_put(struct svc_sock *svsk) | 286 | svc_sock_put(struct svc_sock *svsk) |
287 | { | 287 | { |
288 | struct svc_serv *serv = svsk->sk_server; | 288 | if (atomic_dec_and_test(&svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) { |
289 | |||
290 | spin_lock_bh(&serv->sv_lock); | ||
291 | if (!--(svsk->sk_inuse) && test_bit(SK_DEAD, &svsk->sk_flags)) { | ||
292 | spin_unlock_bh(&serv->sv_lock); | ||
293 | dprintk("svc: releasing dead socket\n"); | 289 | dprintk("svc: releasing dead socket\n"); |
294 | sock_release(svsk->sk_sock); | 290 | sock_release(svsk->sk_sock); |
295 | kfree(svsk); | 291 | kfree(svsk); |
296 | } | 292 | } |
297 | else | ||
298 | spin_unlock_bh(&serv->sv_lock); | ||
299 | } | 293 | } |
300 | 294 | ||
301 | static void | 295 | static void |
@@ -897,7 +891,7 @@ svc_tcp_accept(struct svc_sock *svsk) | |||
897 | struct svc_sock, | 891 | struct svc_sock, |
898 | sk_list); | 892 | sk_list); |
899 | set_bit(SK_CLOSE, &svsk->sk_flags); | 893 | set_bit(SK_CLOSE, &svsk->sk_flags); |
900 | svsk->sk_inuse ++; | 894 | atomic_inc(&svsk->sk_inuse); |
901 | } | 895 | } |
902 | spin_unlock_bh(&serv->sv_lock); | 896 | spin_unlock_bh(&serv->sv_lock); |
903 | 897 | ||
@@ -1229,7 +1223,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout) | |||
1229 | spin_lock_bh(&serv->sv_lock); | 1223 | spin_lock_bh(&serv->sv_lock); |
1230 | if ((svsk = svc_sock_dequeue(serv)) != NULL) { | 1224 | if ((svsk = svc_sock_dequeue(serv)) != NULL) { |
1231 | rqstp->rq_sock = svsk; | 1225 | rqstp->rq_sock = svsk; |
1232 | svsk->sk_inuse++; | 1226 | atomic_inc(&svsk->sk_inuse); |
1233 | rqstp->rq_reserved = serv->sv_bufsz; | 1227 | rqstp->rq_reserved = serv->sv_bufsz; |
1234 | svsk->sk_reserved += rqstp->rq_reserved; | 1228 | svsk->sk_reserved += rqstp->rq_reserved; |
1235 | } else { | 1229 | } else { |
@@ -1261,7 +1255,7 @@ svc_recv(struct svc_rqst *rqstp, long timeout) | |||
1261 | spin_unlock_bh(&serv->sv_lock); | 1255 | spin_unlock_bh(&serv->sv_lock); |
1262 | 1256 | ||
1263 | dprintk("svc: server %p, socket %p, inuse=%d\n", | 1257 | dprintk("svc: server %p, socket %p, inuse=%d\n", |
1264 | rqstp, svsk, svsk->sk_inuse); | 1258 | rqstp, svsk, atomic_read(&svsk->sk_inuse)); |
1265 | len = svsk->sk_recvfrom(rqstp); | 1259 | len = svsk->sk_recvfrom(rqstp); |
1266 | dprintk("svc: got len=%d\n", len); | 1260 | dprintk("svc: got len=%d\n", len); |
1267 | 1261 | ||
@@ -1357,9 +1351,9 @@ svc_age_temp_sockets(unsigned long closure) | |||
1357 | 1351 | ||
1358 | if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) | 1352 | if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) |
1359 | continue; | 1353 | continue; |
1360 | if (svsk->sk_inuse || test_bit(SK_BUSY, &svsk->sk_flags)) | 1354 | if (atomic_read(&svsk->sk_inuse) || test_bit(SK_BUSY, &svsk->sk_flags)) |
1361 | continue; | 1355 | continue; |
1362 | svsk->sk_inuse++; | 1356 | atomic_inc(&svsk->sk_inuse); |
1363 | list_move(le, &to_be_aged); | 1357 | list_move(le, &to_be_aged); |
1364 | set_bit(SK_CLOSE, &svsk->sk_flags); | 1358 | set_bit(SK_CLOSE, &svsk->sk_flags); |
1365 | set_bit(SK_DETACHED, &svsk->sk_flags); | 1359 | set_bit(SK_DETACHED, &svsk->sk_flags); |
@@ -1420,6 +1414,7 @@ svc_setup_socket(struct svc_serv *serv, struct socket *sock, | |||
1420 | svsk->sk_odata = inet->sk_data_ready; | 1414 | svsk->sk_odata = inet->sk_data_ready; |
1421 | svsk->sk_owspace = inet->sk_write_space; | 1415 | svsk->sk_owspace = inet->sk_write_space; |
1422 | svsk->sk_server = serv; | 1416 | svsk->sk_server = serv; |
1417 | atomic_set(&svsk->sk_inuse, 0); | ||
1423 | svsk->sk_lastrecv = get_seconds(); | 1418 | svsk->sk_lastrecv = get_seconds(); |
1424 | INIT_LIST_HEAD(&svsk->sk_deferred); | 1419 | INIT_LIST_HEAD(&svsk->sk_deferred); |
1425 | INIT_LIST_HEAD(&svsk->sk_ready); | 1420 | INIT_LIST_HEAD(&svsk->sk_ready); |
@@ -1563,7 +1558,7 @@ svc_delete_socket(struct svc_sock *svsk) | |||
1563 | if (test_bit(SK_TEMP, &svsk->sk_flags)) | 1558 | if (test_bit(SK_TEMP, &svsk->sk_flags)) |
1564 | serv->sv_tmpcnt--; | 1559 | serv->sv_tmpcnt--; |
1565 | 1560 | ||
1566 | if (!svsk->sk_inuse) { | 1561 | if (!atomic_read(&svsk->sk_inuse)) { |
1567 | spin_unlock_bh(&serv->sv_lock); | 1562 | spin_unlock_bh(&serv->sv_lock); |
1568 | if (svsk->sk_sock->file) | 1563 | if (svsk->sk_sock->file) |
1569 | sockfd_put(svsk->sk_sock); | 1564 | sockfd_put(svsk->sk_sock); |
@@ -1644,10 +1639,8 @@ svc_defer(struct cache_req *req) | |||
1644 | dr->argslen = rqstp->rq_arg.len >> 2; | 1639 | dr->argslen = rqstp->rq_arg.len >> 2; |
1645 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); | 1640 | memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); |
1646 | } | 1641 | } |
1647 | spin_lock_bh(&rqstp->rq_server->sv_lock); | 1642 | atomic_inc(&rqstp->rq_sock->sk_inuse); |
1648 | rqstp->rq_sock->sk_inuse++; | ||
1649 | dr->svsk = rqstp->rq_sock; | 1643 | dr->svsk = rqstp->rq_sock; |
1650 | spin_unlock_bh(&rqstp->rq_server->sv_lock); | ||
1651 | 1644 | ||
1652 | dr->handle.revisit = svc_revisit; | 1645 | dr->handle.revisit = svc_revisit; |
1653 | return &dr->handle; | 1646 | return &dr->handle; |