diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/svc.c | 9 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 57 |
2 files changed, 32 insertions, 34 deletions
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index b9ba2a8c1c19..89a588b4478b 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -515,15 +515,6 @@ EXPORT_SYMBOL_GPL(svc_create_pooled); | |||
515 | 515 | ||
516 | void svc_shutdown_net(struct svc_serv *serv, struct net *net) | 516 | void svc_shutdown_net(struct svc_serv *serv, struct net *net) |
517 | { | 517 | { |
518 | /* | ||
519 | * The set of xprts (contained in the sv_tempsocks and | ||
520 | * sv_permsocks lists) is now constant, since it is modified | ||
521 | * only by accepting new sockets (done by service threads in | ||
522 | * svc_recv) or aging old ones (done by sv_temptimer), or | ||
523 | * configuration changes (excluded by whatever locking the | ||
524 | * caller is using--nfsd_mutex in the case of nfsd). So it's | ||
525 | * safe to traverse those lists and shut everything down: | ||
526 | */ | ||
527 | svc_close_net(serv, net); | 518 | svc_close_net(serv, net); |
528 | 519 | ||
529 | if (serv->sv_shutdown) | 520 | if (serv->sv_shutdown) |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 11a33c874848..80a6640f329b 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -955,21 +955,24 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
955 | } | 955 | } |
956 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 956 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
957 | 957 | ||
958 | static void svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) | 958 | static int svc_close_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) |
959 | { | 959 | { |
960 | struct svc_xprt *xprt; | 960 | struct svc_xprt *xprt; |
961 | int ret = 0; | ||
961 | 962 | ||
962 | spin_lock(&serv->sv_lock); | 963 | spin_lock(&serv->sv_lock); |
963 | list_for_each_entry(xprt, xprt_list, xpt_list) { | 964 | list_for_each_entry(xprt, xprt_list, xpt_list) { |
964 | if (xprt->xpt_net != net) | 965 | if (xprt->xpt_net != net) |
965 | continue; | 966 | continue; |
967 | ret++; | ||
966 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | 968 | set_bit(XPT_CLOSE, &xprt->xpt_flags); |
967 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 969 | svc_xprt_enqueue(xprt); |
968 | } | 970 | } |
969 | spin_unlock(&serv->sv_lock); | 971 | spin_unlock(&serv->sv_lock); |
972 | return ret; | ||
970 | } | 973 | } |
971 | 974 | ||
972 | static void svc_clear_pools(struct svc_serv *serv, struct net *net) | 975 | static struct svc_xprt *svc_dequeue_net(struct svc_serv *serv, struct net *net) |
973 | { | 976 | { |
974 | struct svc_pool *pool; | 977 | struct svc_pool *pool; |
975 | struct svc_xprt *xprt; | 978 | struct svc_xprt *xprt; |
@@ -984,42 +987,46 @@ static void svc_clear_pools(struct svc_serv *serv, struct net *net) | |||
984 | if (xprt->xpt_net != net) | 987 | if (xprt->xpt_net != net) |
985 | continue; | 988 | continue; |
986 | list_del_init(&xprt->xpt_ready); | 989 | list_del_init(&xprt->xpt_ready); |
990 | spin_unlock_bh(&pool->sp_lock); | ||
991 | return xprt; | ||
987 | } | 992 | } |
988 | spin_unlock_bh(&pool->sp_lock); | 993 | spin_unlock_bh(&pool->sp_lock); |
989 | } | 994 | } |
995 | return NULL; | ||
990 | } | 996 | } |
991 | 997 | ||
992 | static void svc_clear_list(struct svc_serv *serv, struct list_head *xprt_list, struct net *net) | 998 | static void svc_clean_up_xprts(struct svc_serv *serv, struct net *net) |
993 | { | 999 | { |
994 | struct svc_xprt *xprt; | 1000 | struct svc_xprt *xprt; |
995 | struct svc_xprt *tmp; | ||
996 | LIST_HEAD(victims); | ||
997 | |||
998 | spin_lock(&serv->sv_lock); | ||
999 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | ||
1000 | if (xprt->xpt_net != net) | ||
1001 | continue; | ||
1002 | list_move(&xprt->xpt_list, &victims); | ||
1003 | } | ||
1004 | spin_unlock(&serv->sv_lock); | ||
1005 | 1001 | ||
1006 | list_for_each_entry_safe(xprt, tmp, &victims, xpt_list) | 1002 | while ((xprt = svc_dequeue_net(serv, net))) { |
1003 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
1007 | svc_delete_xprt(xprt); | 1004 | svc_delete_xprt(xprt); |
1005 | } | ||
1008 | } | 1006 | } |
1009 | 1007 | ||
1008 | /* | ||
1009 | * Server threads may still be running (especially in the case where the | ||
1010 | * service is still running in other network namespaces). | ||
1011 | * | ||
1012 | * So we shut down sockets the same way we would on a running server, by | ||
1013 | * setting XPT_CLOSE, enqueuing, and letting a thread pick it up to do | ||
1014 | * the close. In the case there are no such other threads, | ||
1015 | * threads running, svc_clean_up_xprts() does a simple version of a | ||
1016 | * server's main event loop, and in the case where there are other | ||
1017 | * threads, we may need to wait a little while and then check again to | ||
1018 | * see if they're done. | ||
1019 | */ | ||
1010 | void svc_close_net(struct svc_serv *serv, struct net *net) | 1020 | void svc_close_net(struct svc_serv *serv, struct net *net) |
1011 | { | 1021 | { |
1012 | svc_close_list(serv, &serv->sv_tempsocks, net); | 1022 | int delay = 0; |
1013 | svc_close_list(serv, &serv->sv_permsocks, net); | ||
1014 | 1023 | ||
1015 | svc_clear_pools(serv, net); | 1024 | while (svc_close_list(serv, &serv->sv_permsocks, net) + |
1016 | /* | 1025 | svc_close_list(serv, &serv->sv_tempsocks, net)) { |
1017 | * At this point the sp_sockets lists will stay empty, since | 1026 | |
1018 | * svc_xprt_enqueue will not add new entries without taking the | 1027 | svc_clean_up_xprts(serv, net); |
1019 | * sp_lock and checking XPT_BUSY. | 1028 | msleep(delay++); |
1020 | */ | 1029 | } |
1021 | svc_clear_list(serv, &serv->sv_tempsocks, net); | ||
1022 | svc_clear_list(serv, &serv->sv_permsocks, net); | ||
1023 | } | 1030 | } |
1024 | 1031 | ||
1025 | /* | 1032 | /* |