diff options
author | Tom Tucker <tom@opengridcomputing.com> | 2009-01-05 16:21:19 -0500 |
---|---|---|
committer | J. Bruce Fields <bfields@citi.umich.edu> | 2009-01-07 17:08:46 -0500 |
commit | 22945e4a1c7454c97f5d8aee1ef526c83fef3223 (patch) | |
tree | f082143420da55b97c98a1534336b0cf03412e0b /net/sunrpc | |
parent | 9a8d248e2d2e9c880ac4561f27fea5dc200655bd (diff) |
svc: Clean up deferred requests on transport destruction
A race between svc_revisit and svc_delete_xprt can result in
deferred requests holding references on a transport that can never be
recovered because dead transports are not enqueued for subsequent
processing.
Check for XPT_DEAD in revisit to clean up completing deferrals on a dead
transport and sweep a transport's deferred queue to do the same for queued
but unprocessed deferrals.
Signed-off-by: Tom Tucker <tom@opengridcomputing.com>
Signed-off-by: J. Bruce Fields <bfields@citi.umich.edu>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 29619612b9f1..a78b87937c73 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -850,6 +850,11 @@ static void svc_age_temp_xprts(unsigned long closure) | |||
850 | void svc_delete_xprt(struct svc_xprt *xprt) | 850 | void svc_delete_xprt(struct svc_xprt *xprt) |
851 | { | 851 | { |
852 | struct svc_serv *serv = xprt->xpt_server; | 852 | struct svc_serv *serv = xprt->xpt_server; |
853 | struct svc_deferred_req *dr; | ||
854 | |||
855 | /* Only do this once */ | ||
856 | if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) | ||
857 | return; | ||
853 | 858 | ||
854 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); | 859 | dprintk("svc: svc_delete_xprt(%p)\n", xprt); |
855 | xprt->xpt_ops->xpo_detach(xprt); | 860 | xprt->xpt_ops->xpo_detach(xprt); |
@@ -864,12 +869,16 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
864 | * while still attached to a queue, the queue itself | 869 | * while still attached to a queue, the queue itself |
865 | * is about to be destroyed (in svc_destroy). | 870 | * is about to be destroyed (in svc_destroy). |
866 | */ | 871 | */ |
867 | if (!test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) { | 872 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
868 | BUG_ON(atomic_read(&xprt->xpt_ref.refcount) < 2); | 873 | serv->sv_tmpcnt--; |
869 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 874 | |
870 | serv->sv_tmpcnt--; | 875 | for (dr = svc_deferred_dequeue(xprt); dr; |
876 | dr = svc_deferred_dequeue(xprt)) { | ||
871 | svc_xprt_put(xprt); | 877 | svc_xprt_put(xprt); |
878 | kfree(dr); | ||
872 | } | 879 | } |
880 | |||
881 | svc_xprt_put(xprt); | ||
873 | spin_unlock_bh(&serv->sv_lock); | 882 | spin_unlock_bh(&serv->sv_lock); |
874 | } | 883 | } |
875 | 884 | ||
@@ -915,17 +924,19 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) | |||
915 | container_of(dreq, struct svc_deferred_req, handle); | 924 | container_of(dreq, struct svc_deferred_req, handle); |
916 | struct svc_xprt *xprt = dr->xprt; | 925 | struct svc_xprt *xprt = dr->xprt; |
917 | 926 | ||
918 | if (too_many) { | 927 | spin_lock(&xprt->xpt_lock); |
928 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
929 | if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { | ||
930 | spin_unlock(&xprt->xpt_lock); | ||
931 | dprintk("revisit canceled\n"); | ||
919 | svc_xprt_put(xprt); | 932 | svc_xprt_put(xprt); |
920 | kfree(dr); | 933 | kfree(dr); |
921 | return; | 934 | return; |
922 | } | 935 | } |
923 | dprintk("revisit queued\n"); | 936 | dprintk("revisit queued\n"); |
924 | dr->xprt = NULL; | 937 | dr->xprt = NULL; |
925 | spin_lock(&xprt->xpt_lock); | ||
926 | list_add(&dr->handle.recent, &xprt->xpt_deferred); | 938 | list_add(&dr->handle.recent, &xprt->xpt_deferred); |
927 | spin_unlock(&xprt->xpt_lock); | 939 | spin_unlock(&xprt->xpt_lock); |
928 | set_bit(XPT_DEFERRED, &xprt->xpt_flags); | ||
929 | svc_xprt_enqueue(xprt); | 940 | svc_xprt_enqueue(xprt); |
930 | svc_xprt_put(xprt); | 941 | svc_xprt_put(xprt); |
931 | } | 942 | } |