diff options
author | NeilBrown <neilb@suse.de> | 2010-11-14 19:27:01 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2010-12-17 15:48:18 -0500 |
commit | 7c96aef75949a56ec427fc6a2522dace2af33605 (patch) | |
tree | ca65368231d4c5b39e3adce558bfe15d712809c7 /net/sunrpc | |
parent | 18b631f83810e95eeb2e1839889b27142bd8d6d8 (diff) |
sunrpc: remove xpt_pool
The xpt_pool field is only used for reporting BUGs.
And it isn't used correctly.
In particular, when it is cleared in svc_xprt_received before
XPT_BUSY is cleared, there is no guarantee that either the
compiler or the CPU might not re-order to two assignments, just
setting xpt_pool to NULL after XPT_BUSY is cleared.
If a different cpu were running svc_xprt_enqueue at this moment,
it might see XPT_BUSY clear and then xpt_pool non-NULL, and
so BUG.
This could be fixed by calling
smp_mb__before_clear_bit()
before the clear_bit. However as xpt_pool isn't really used,
it seems safest to simply remove xpt_pool.
Another alternate would be to change the clear_bit to
clear_bit_unlock, and the test_and_set_bit to test_and_set_bit_lock.
Signed-off-by: NeilBrown <neilb@suse.de>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 5 |
1 files changed, 0 insertions, 5 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 5a75d23645c8..5eae53b1e306 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -351,8 +351,6 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
351 | dprintk("svc: transport %p busy, not enqueued\n", xprt); | 351 | dprintk("svc: transport %p busy, not enqueued\n", xprt); |
352 | goto out_unlock; | 352 | goto out_unlock; |
353 | } | 353 | } |
354 | BUG_ON(xprt->xpt_pool != NULL); | ||
355 | xprt->xpt_pool = pool; | ||
356 | 354 | ||
357 | if (!list_empty(&pool->sp_threads)) { | 355 | if (!list_empty(&pool->sp_threads)) { |
358 | rqstp = list_entry(pool->sp_threads.next, | 356 | rqstp = list_entry(pool->sp_threads.next, |
@@ -370,13 +368,11 @@ void svc_xprt_enqueue(struct svc_xprt *xprt) | |||
370 | rqstp->rq_reserved = serv->sv_max_mesg; | 368 | rqstp->rq_reserved = serv->sv_max_mesg; |
371 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); | 369 | atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); |
372 | pool->sp_stats.threads_woken++; | 370 | pool->sp_stats.threads_woken++; |
373 | BUG_ON(xprt->xpt_pool != pool); | ||
374 | wake_up(&rqstp->rq_wait); | 371 | wake_up(&rqstp->rq_wait); |
375 | } else { | 372 | } else { |
376 | dprintk("svc: transport %p put into queue\n", xprt); | 373 | dprintk("svc: transport %p put into queue\n", xprt); |
377 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | 374 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); |
378 | pool->sp_stats.sockets_queued++; | 375 | pool->sp_stats.sockets_queued++; |
379 | BUG_ON(xprt->xpt_pool != pool); | ||
380 | } | 376 | } |
381 | 377 | ||
382 | out_unlock: | 378 | out_unlock: |
@@ -415,7 +411,6 @@ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) | |||
415 | void svc_xprt_received(struct svc_xprt *xprt) | 411 | void svc_xprt_received(struct svc_xprt *xprt) |
416 | { | 412 | { |
417 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); | 413 | BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); |
418 | xprt->xpt_pool = NULL; | ||
419 | /* As soon as we clear busy, the xprt could be closed and | 414 | /* As soon as we clear busy, the xprt could be closed and |
420 | * 'put', so we need a reference to call svc_xprt_enqueue with: | 415 | * 'put', so we need a reference to call svc_xprt_enqueue with: |
421 | */ | 416 | */ |