diff options
author | J. Bruce Fields <bfields@redhat.com> | 2019-01-11 15:36:40 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@redhat.com> | 2019-02-06 15:37:14 -0500 |
commit | 95503d295ad6af20f09efff193e085481a962fd2 (patch) | |
tree | a580f60a4a517c73575705c1745de208840f1c43 /net/sunrpc/svc_xprt.c | |
parent | 66c898caefd346a88fbef242eb7892fd959308f6 (diff) |
svcrpc: fix unlikely races preventing queueing of sockets
In the rpc server, When something happens that might be reason to wake
up a thread to do something, what we do is
- modify xpt_flags, sk_sock->flags, xpt_reserved, or
xpt_nr_rqsts to indicate the new situation
- call svc_xprt_enqueue() to decide whether to wake up a thread.
svc_xprt_enqueue may require multiple conditions to be true before
queueing up a thread to handle the xprt. In the SMP case, one of the
other CPU's may have set another required condition, and in that case,
although both CPUs run svc_xprt_enqueue(), it's possible that neither
call sees the writes done by the other CPU in time, and neither one
recognizes that all the required conditions have been set. A socket
could therefore be ignored indefinitely.
Add memory barries to ensure that any svc_xprt_enqueue() call will
always see the conditions changed by other CPUs before deciding to
ignore a socket.
I've never seen this race reported. In the unlikely event it happens,
another event will usually come along and the problem will fix itself.
So I don't think this is worth backporting to stable.
Chuck tried this patch and said "I don't see any performance
regressions, but my server has only a single last-level CPU cache."
Tested-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: J. Bruce Fields <bfields@redhat.com>
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index a2435d3811a9..61530b1b7754 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -357,6 +357,7 @@ static void svc_xprt_release_slot(struct svc_rqst *rqstp) | |||
357 | struct svc_xprt *xprt = rqstp->rq_xprt; | 357 | struct svc_xprt *xprt = rqstp->rq_xprt; |
358 | if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { | 358 | if (test_and_clear_bit(RQ_DATA, &rqstp->rq_flags)) { |
359 | atomic_dec(&xprt->xpt_nr_rqsts); | 359 | atomic_dec(&xprt->xpt_nr_rqsts); |
360 | smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */ | ||
360 | svc_xprt_enqueue(xprt); | 361 | svc_xprt_enqueue(xprt); |
361 | } | 362 | } |
362 | } | 363 | } |
@@ -365,6 +366,15 @@ static bool svc_xprt_ready(struct svc_xprt *xprt) | |||
365 | { | 366 | { |
366 | unsigned long xpt_flags; | 367 | unsigned long xpt_flags; |
367 | 368 | ||
369 | /* | ||
370 | * If another cpu has recently updated xpt_flags, | ||
371 | * sk_sock->flags, xpt_reserved, or xpt_nr_rqsts, we need to | ||
372 | * know about it; otherwise it's possible that both that cpu and | ||
373 | * this one could call svc_xprt_enqueue() without either | ||
374 | * svc_xprt_enqueue() recognizing that the conditions below | ||
375 | * are satisfied, and we could stall indefinitely: | ||
376 | */ | ||
377 | smp_rmb(); | ||
368 | xpt_flags = READ_ONCE(xprt->xpt_flags); | 378 | xpt_flags = READ_ONCE(xprt->xpt_flags); |
369 | 379 | ||
370 | if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE))) | 380 | if (xpt_flags & (BIT(XPT_CONN) | BIT(XPT_CLOSE))) |
@@ -479,7 +489,7 @@ void svc_reserve(struct svc_rqst *rqstp, int space) | |||
479 | if (xprt && space < rqstp->rq_reserved) { | 489 | if (xprt && space < rqstp->rq_reserved) { |
480 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); | 490 | atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); |
481 | rqstp->rq_reserved = space; | 491 | rqstp->rq_reserved = space; |
482 | 492 | smp_wmb(); /* See smp_rmb() in svc_xprt_ready() */ | |
483 | svc_xprt_enqueue(xprt); | 493 | svc_xprt_enqueue(xprt); |
484 | } | 494 | } |
485 | } | 495 | } |