aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorGreg Banks <gnb@melbourne.sgi.com>2006-10-02 05:17:57 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-10-02 10:57:19 -0400
commitc081a0c7cfe42adf8e8b9c2b8d0b2ec7f47603e8 (patch)
treeb8be50680c576427f29195946332bacd56eb1055 /net
parent5685f0fa1c24b138d041ef129ed419c5effa40e1 (diff)
[PATCH] knfsd: test and set SK_BUSY atomically
The SK_BUSY bit in svc_sock->sk_flags ensures that we do not attempt to enqueue a socket twice. Currently, setting and clearing the bit is protected by svc_serv->sv_lock. As I intend to reduce the data that the lock protects so it's not held when svc_sock_enqueue() tests and sets SK_BUSY, that test and set needs to be atomic. Signed-off-by: Greg Banks <gnb@melbourne.sgi.com> Signed-off-by: Neil Brown <neilb@suse.de> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/svcsock.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 88b51c4ecb8b..a38df4589ae9 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -46,14 +46,13 @@
46 46
47/* SMP locking strategy: 47/* SMP locking strategy:
48 * 48 *
49 * svc_serv->sv_lock protects most stuff for that service. 49 * svc_serv->sv_lock protects most stuff for that service.
50 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list 50 * svc_sock->sk_defer_lock protects the svc_sock->sk_deferred list
51 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply.
51 * 52 *
52 * Some flags can be set to certain values at any time 53 * Some flags can be set to certain values at any time
53 * providing that certain rules are followed: 54 * providing that certain rules are followed:
54 * 55 *
55 * SK_BUSY can be set to 0 at any time.
56 * svc_sock_enqueue must be called afterwards
57 * SK_CONN, SK_DATA, can be set or cleared at any time. 56 * SK_CONN, SK_DATA, can be set or cleared at any time.
58 * after a set, svc_sock_enqueue must be called. 57 * after a set, svc_sock_enqueue must be called.
59 * after a clear, the socket must be read/accepted 58 * after a clear, the socket must be read/accepted
@@ -170,8 +169,13 @@ svc_sock_enqueue(struct svc_sock *svsk)
170 goto out_unlock; 169 goto out_unlock;
171 } 170 }
172 171
173 if (test_bit(SK_BUSY, &svsk->sk_flags)) { 172 /* Mark socket as busy. It will remain in this state until the
174 /* Don't enqueue socket while daemon is receiving */ 173 * server has processed all pending data and put the socket back
174 * on the idle list. We update SK_BUSY atomically because
175 * it also guards against trying to enqueue the svc_sock twice.
176 */
177 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) {
178 /* Don't enqueue socket while already enqueued */
175 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); 179 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk);
176 goto out_unlock; 180 goto out_unlock;
177 } 181 }
@@ -185,15 +189,11 @@ svc_sock_enqueue(struct svc_sock *svsk)
185 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 189 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n",
186 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz, 190 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_bufsz,
187 svc_sock_wspace(svsk)); 191 svc_sock_wspace(svsk));
192 clear_bit(SK_BUSY, &svsk->sk_flags);
188 goto out_unlock; 193 goto out_unlock;
189 } 194 }
190 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 195 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags);
191 196
192 /* Mark socket as busy. It will remain in this state until the
193 * server has processed all pending data and put the socket back
194 * on the idle list.
195 */
196 set_bit(SK_BUSY, &svsk->sk_flags);
197 197
198 if (!list_empty(&serv->sv_threads)) { 198 if (!list_empty(&serv->sv_threads)) {
199 rqstp = list_entry(serv->sv_threads.next, 199 rqstp = list_entry(serv->sv_threads.next,