diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/svc.c | 10 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 48 |
2 files changed, 38 insertions, 20 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 60babf0a9847..1a6c16ed7fa6 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -527,7 +527,15 @@ svc_destroy(struct svc_serv *serv) | |||
527 | printk("svc_destroy: no threads for serv=%p!\n", serv); | 527 | printk("svc_destroy: no threads for serv=%p!\n", serv); |
528 | 528 | ||
529 | del_timer_sync(&serv->sv_temptimer); | 529 | del_timer_sync(&serv->sv_temptimer); |
530 | 530 | /* | |
531 | * The set of xprts (contained in the sv_tempsocks and | ||
532 | * sv_permsocks lists) is now constant, since it is modified | ||
533 | * only by accepting new sockets (done by service threads in | ||
534 | * svc_recv) or aging old ones (done by sv_temptimer), or | ||
535 | * configuration changes (excluded by whatever locking the | ||
536 | * caller is using--nfsd_mutex in the case of nfsd). So it's | ||
537 | * safe to traverse those lists and shut everything down: | ||
538 | */ | ||
531 | svc_close_all(serv); | 539 | svc_close_all(serv); |
532 | 540 | ||
533 | if (serv->sv_shutdown) | 541 | if (serv->sv_shutdown) |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 099ddf99d2a1..0d80c064e634 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -894,14 +894,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt) | |||
894 | spin_lock_bh(&serv->sv_lock); | 894 | spin_lock_bh(&serv->sv_lock); |
895 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) | 895 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) |
896 | list_del_init(&xprt->xpt_list); | 896 | list_del_init(&xprt->xpt_list); |
897 | /* | 897 | BUG_ON(!list_empty(&xprt->xpt_ready)); |
898 | * The only time we're called while xpt_ready is still on a list | ||
899 | * is while the list itself is about to be destroyed (in | ||
900 | * svc_destroy). BUT svc_xprt_enqueue could still be attempting | ||
901 | * to add new entries to the sp_sockets list, so we can't leave | ||
902 | * a freed xprt on it. | ||
903 | */ | ||
904 | list_del_init(&xprt->xpt_ready); | ||
905 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 898 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
906 | serv->sv_tmpcnt--; | 899 | serv->sv_tmpcnt--; |
907 | spin_unlock_bh(&serv->sv_lock); | 900 | spin_unlock_bh(&serv->sv_lock); |
@@ -932,28 +925,45 @@ EXPORT_SYMBOL_GPL(svc_close_xprt); | |||
932 | static void svc_close_list(struct list_head *xprt_list) | 925 | static void svc_close_list(struct list_head *xprt_list) |
933 | { | 926 | { |
934 | struct svc_xprt *xprt; | 927 | struct svc_xprt *xprt; |
935 | struct svc_xprt *tmp; | ||
936 | 928 | ||
937 | /* | 929 | list_for_each_entry(xprt, xprt_list, xpt_list) { |
938 | * The server is shutting down, and no more threads are running. | ||
939 | * svc_xprt_enqueue() might still be running, but at worst it | ||
940 | * will re-add the xprt to sp_sockets, which will soon get | ||
941 | * freed. So we don't bother with any more locking, and don't | ||
942 | * leave the close to the (nonexistent) server threads: | ||
943 | */ | ||
944 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | ||
945 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 930 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
946 | svc_delete_xprt(xprt); | 931 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
947 | } | 932 | } |
948 | } | 933 | } |
949 | 934 | ||
950 | void svc_close_all(struct svc_serv *serv) | 935 | void svc_close_all(struct svc_serv *serv) |
951 | { | 936 | { |
937 | struct svc_pool *pool; | ||
938 | struct svc_xprt *xprt; | ||
939 | struct svc_xprt *tmp; | ||
940 | int i; | ||
941 | |||
952 | svc_close_list(&serv->sv_tempsocks); | 942 | svc_close_list(&serv->sv_tempsocks); |
953 | svc_close_list(&serv->sv_permsocks); | 943 | svc_close_list(&serv->sv_permsocks); |
944 | |||
945 | for (i = 0; i < serv->sv_nrpools; i++) { | ||
946 | pool = &serv->sv_pools[i]; | ||
947 | |||
948 | spin_lock_bh(&pool->sp_lock); | ||
949 | while (!list_empty(&pool->sp_sockets)) { | ||
950 | xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready); | ||
951 | list_del_init(&xprt->xpt_ready); | ||
952 | } | ||
953 | spin_unlock_bh(&pool->sp_lock); | ||
954 | } | ||
955 | /* | ||
956 | * At this point the sp_sockets lists will stay empty, since | ||
957 | * svc_enqueue will not add new entries without taking the | ||
958 | * sp_lock and checking XPT_BUSY. | ||
959 | */ | ||
960 | list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list) | ||
961 | svc_delete_xprt(xprt); | ||
962 | list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list) | ||
963 | svc_delete_xprt(xprt); | ||
964 | |||
954 | BUG_ON(!list_empty(&serv->sv_permsocks)); | 965 | BUG_ON(!list_empty(&serv->sv_permsocks)); |
955 | BUG_ON(!list_empty(&serv->sv_tempsocks)); | 966 | BUG_ON(!list_empty(&serv->sv_tempsocks)); |
956 | |||
957 | } | 967 | } |
958 | 968 | ||
959 | /* | 969 | /* |