diff options
| author | J. Bruce Fields <bfields@redhat.com> | 2010-10-26 11:32:03 -0400 |
|---|---|---|
| committer | J. Bruce Fields <bfields@redhat.com> | 2010-11-19 18:35:12 -0500 |
| commit | 9c335c0b8daf56b9f73479d00b1dd726e1fcca09 (patch) | |
| tree | 1b408ebd50345d5f3c699a2cd14c73248f39a29d /net | |
| parent | b176331627fccc726d28f4fc4a357d1f3c19dbf0 (diff) | |
svcrpc: fix wspace-checking race
We call svc_xprt_enqueue() after something happens which we think may
require handling from a server thread. To avoid such events being lost,
svc_xprt_enqueue() must guarantee that there will be a svc_serv() call
from a server thread following any such event. It does that by either
waking up a server thread itself, or checking that XPT_BUSY is set (in
which case somebody else is doing it).
But the check of XPT_BUSY could occur just as someone finishes
processing some other event, and just before they clear XPT_BUSY.
Therefore it's important not to clear XPT_BUSY without subsequently
doing another svc_export_enqueue() to check whether the xprt should be
requeued.
The xpo_wspace() check in svc_xprt_enqueue() breaks this rule, allowing
an event to be missed in situations like:
data arrives
call svc_tcp_data_ready():
call svc_xprt_enqueue():
set BUSY
find no write space
svc_reserve():
free up write space
call svc_enqueue():
test BUSY
clear BUSY
So, instead, check wspace in the same places that the state flags are
checked: before taking BUSY, and in svc_receive().
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net')
| -rw-r--r-- | net/sunrpc/svc_xprt.c | 33 |
1 files changed, 11 insertions, 22 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index bfbda676574..b6f47da170b 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
| @@ -302,6 +302,15 @@ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) | |||
| 302 | list_del(&rqstp->rq_list); | 302 | list_del(&rqstp->rq_list); |
| 303 | } | 303 | } |
| 304 | 304 | ||
| 305 | static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) | ||
| 306 | { | ||
| 307 | if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) | ||
| 308 | return true; | ||
| 309 | if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) | ||
| 310 | return xprt->xpt_ops->xpo_has_wspace(xprt); | ||
| 311 | return false; | ||
| 312 | } | ||
| 313 | |||
| 305 | /* | 314 | /* |
| 306 | * Queue up a transport with data pending. If there are idle nfsd | 315 | * Queue up a transport with data pending. If there are idle nfsd |
| 307 | * processes, wake 'em up. | 316 | * processes, wake 'em up. |
| @@ -314,8 +323,7 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
| 314 | struct svc_rqst *rqstp; | 323 | struct svc_rqst *rqstp; |
| 315 | int cpu; | 324 | int cpu; |
| 316 | 325 | ||
| 317 | if (!(xprt->xpt_flags & | 326 | if (!svc_xprt_has_something_to_do(xprt)) |
| 318 | ((1<<XPT_CONN)|(1<<XPT_DATA)|(1<<XPT_CLOSE)|(1<<XPT_DEFERRED)))) | ||
| 319 | return; | 327 | return; |
| 320 | 328 | ||
| 321 | cpu = get_cpu(); | 329 | cpu = get_cpu(); |
| @@ -345,25 +353,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
| 345 | BUG_ON(xprt->xpt_pool != NULL); | 353 | BUG_ON(xprt->xpt_pool != NULL); |
| 346 | xprt->xpt_pool = pool; | 354 | xprt->xpt_pool = pool; |
| 347 | 355 | ||
| 348 | /* Handle pending connection */ | ||
| 349 | if (test_bit(XPT_CONN, &xprt->xpt_flags)) | ||
| 350 | goto process; | ||
| 351 | |||
| 352 | /* Handle close in-progress */ | ||
| 353 | if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) | ||
| 354 | goto process; | ||
| 355 | |||
| 356 | /* Check if we have space to reply to a request */ | ||
| 357 | if (!xprt->xpt_ops->xpo_has_wspace(xprt)) { | ||
| 358 | /* Don't enqueue while not enough space for reply */ | ||
| 359 | dprintk("svc: no write space, transport %p not enqueued\n", | ||
| 360 | xprt); | ||
| 361 | xprt->xpt_pool = NULL; | ||
| 362 | clear_bit(XPT_BUSY, &xprt->xpt_flags); | ||
| 363 | goto out_unlock; | ||
| 364 | } | ||
| 365 | |||
| 366 | process: | ||
| 367 | if (!list_empty(&pool->sp_threads)) { | 356 | if (!list_empty(&pool->sp_threads)) { |
| 368 | rqstp = list_entry(pool->sp_threads.next, | 357 | rqstp = list_entry(pool->sp_threads.next, |
| 369 | struct svc_rqst, | 358 | struct svc_rqst, |
| @@ -744,7 +733,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout) | |||
| 744 | spin_unlock_bh(&serv->sv_lock); | 733 | spin_unlock_bh(&serv->sv_lock); |
| 745 | svc_xprt_received(newxpt); | 734 | svc_xprt_received(newxpt); |
| 746 | } | 735 | } |
| 747 | } else { | 736 | } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { |
| 748 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", | 737 | dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", |
| 749 | rqstp, pool->sp_id, xprt, | 738 | rqstp, pool->sp_id, xprt, |
| 750 | atomic_read(&xprt->xpt_ref.refcount)); | 739 | atomic_read(&xprt->xpt_ref.refcount)); |
