diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-14 15:26:41 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-01-14 15:26:41 -0500 |
commit | 0b48d42235caf627121f440b57d376f48a9af8b6 (patch) | |
tree | 400967c5fcb1cd08bbc0e1739e229f9717590f19 /net | |
parent | 8e63dd6e1c589ba99a18df9cbaa41c3178607641 (diff) | |
parent | 7a6ef8c72314f254c107c6a9ed7cb201961ee05a (diff) |
Merge branch 'for-3.3' of git://linux-nfs.org/~bfields/linux
* 'for-3.3' of git://linux-nfs.org/~bfields/linux: (31 commits)
nfsd4: nfsd4_create_clid_dir return value is unused
NFSD: Change name of extended attribute containing junction
svcrpc: don't revert to SVC_POOL_DEFAULT on nfsd shutdown
svcrpc: fix double-free on shutdown of nfsd after changing pool mode
nfsd4: be forgiving in the absence of the recovery directory
nfsd4: fix spurious 4.1 post-reboot failures
NFSD: forget_delegations should use list_for_each_entry_safe
NFSD: Only reinitilize the recall_lru list under the recall lock
nfsd4: initialize special stateid's at compile time
NFSd: use network-namespace-aware cache registering routines
SUNRPC: create svc_xprt in proper network namespace
svcrpc: update outdated BKL comment
nfsd41: allow non-reclaim open-by-fh's in 4.1
svcrpc: avoid memory-corruption on pool shutdown
svcrpc: destroy server sockets all at once
svcrpc: make svc_delete_xprt static
nfsd: Fix oops when parsing a 0 length export
nfsd4: Use kmemdup rather than duplicating its implementation
nfsd4: add a separate (lockowner, inode) lookup
nfsd4: fix CONFIG_NFSD_FAULT_INJECTION compile error
...
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/cache.c | 2 | ||||
-rw-r--r-- | net/sunrpc/svc.c | 25 | ||||
-rw-r--r-- | net/sunrpc/svc_xprt.c | 62 | ||||
-rw-r--r-- | net/sunrpc/svcsock.c | 8 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 2 |
5 files changed, 64 insertions, 35 deletions
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 03b56bc3b659..465df9ae1046 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c | |||
@@ -1641,6 +1641,7 @@ int cache_register_net(struct cache_detail *cd, struct net *net) | |||
1641 | sunrpc_destroy_cache_detail(cd); | 1641 | sunrpc_destroy_cache_detail(cd); |
1642 | return ret; | 1642 | return ret; |
1643 | } | 1643 | } |
1644 | EXPORT_SYMBOL_GPL(cache_register_net); | ||
1644 | 1645 | ||
1645 | int cache_register(struct cache_detail *cd) | 1646 | int cache_register(struct cache_detail *cd) |
1646 | { | 1647 | { |
@@ -1653,6 +1654,7 @@ void cache_unregister_net(struct cache_detail *cd, struct net *net) | |||
1653 | remove_cache_proc_entries(cd, net); | 1654 | remove_cache_proc_entries(cd, net); |
1654 | sunrpc_destroy_cache_detail(cd); | 1655 | sunrpc_destroy_cache_detail(cd); |
1655 | } | 1656 | } |
1657 | EXPORT_SYMBOL_GPL(cache_unregister_net); | ||
1656 | 1658 | ||
1657 | void cache_unregister(struct cache_detail *cd) | 1659 | void cache_unregister(struct cache_detail *cd) |
1658 | { | 1660 | { |
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c index 9d01d46b05f3..e4aabc02368b 100644 --- a/net/sunrpc/svc.c +++ b/net/sunrpc/svc.c | |||
@@ -167,6 +167,7 @@ svc_pool_map_alloc_arrays(struct svc_pool_map *m, unsigned int maxpools) | |||
167 | 167 | ||
168 | fail_free: | 168 | fail_free: |
169 | kfree(m->to_pool); | 169 | kfree(m->to_pool); |
170 | m->to_pool = NULL; | ||
170 | fail: | 171 | fail: |
171 | return -ENOMEM; | 172 | return -ENOMEM; |
172 | } | 173 | } |
@@ -285,9 +286,10 @@ svc_pool_map_put(void) | |||
285 | mutex_lock(&svc_pool_map_mutex); | 286 | mutex_lock(&svc_pool_map_mutex); |
286 | 287 | ||
287 | if (!--m->count) { | 288 | if (!--m->count) { |
288 | m->mode = SVC_POOL_DEFAULT; | ||
289 | kfree(m->to_pool); | 289 | kfree(m->to_pool); |
290 | m->to_pool = NULL; | ||
290 | kfree(m->pool_to); | 291 | kfree(m->pool_to); |
292 | m->pool_to = NULL; | ||
291 | m->npools = 0; | 293 | m->npools = 0; |
292 | } | 294 | } |
293 | 295 | ||
@@ -527,17 +529,20 @@ svc_destroy(struct svc_serv *serv) | |||
527 | printk("svc_destroy: no threads for serv=%p!\n", serv); | 529 | printk("svc_destroy: no threads for serv=%p!\n", serv); |
528 | 530 | ||
529 | del_timer_sync(&serv->sv_temptimer); | 531 | del_timer_sync(&serv->sv_temptimer); |
530 | 532 | /* | |
531 | svc_close_all(&serv->sv_tempsocks); | 533 | * The set of xprts (contained in the sv_tempsocks and |
534 | * sv_permsocks lists) is now constant, since it is modified | ||
535 | * only by accepting new sockets (done by service threads in | ||
536 | * svc_recv) or aging old ones (done by sv_temptimer), or | ||
537 | * configuration changes (excluded by whatever locking the | ||
538 | * caller is using--nfsd_mutex in the case of nfsd). So it's | ||
539 | * safe to traverse those lists and shut everything down: | ||
540 | */ | ||
541 | svc_close_all(serv); | ||
532 | 542 | ||
533 | if (serv->sv_shutdown) | 543 | if (serv->sv_shutdown) |
534 | serv->sv_shutdown(serv); | 544 | serv->sv_shutdown(serv); |
535 | 545 | ||
536 | svc_close_all(&serv->sv_permsocks); | ||
537 | |||
538 | BUG_ON(!list_empty(&serv->sv_permsocks)); | ||
539 | BUG_ON(!list_empty(&serv->sv_tempsocks)); | ||
540 | |||
541 | cache_clean_deferred(serv); | 546 | cache_clean_deferred(serv); |
542 | 547 | ||
543 | if (svc_serv_is_pooled(serv)) | 548 | if (svc_serv_is_pooled(serv)) |
@@ -683,8 +688,8 @@ found_pool: | |||
683 | * Create or destroy enough new threads to make the number | 688 | * Create or destroy enough new threads to make the number |
684 | * of threads the given number. If `pool' is non-NULL, applies | 689 | * of threads the given number. If `pool' is non-NULL, applies |
685 | * only to threads in that pool, otherwise round-robins between | 690 | * only to threads in that pool, otherwise round-robins between |
686 | * all pools. Must be called with a svc_get() reference and | 691 | * all pools. Caller must ensure that mutual exclusion between this and |
687 | * the BKL or another lock to protect access to svc_serv fields. | 692 | * server startup or shutdown. |
688 | * | 693 | * |
689 | * Destroying threads relies on the service threads filling in | 694 | * Destroying threads relies on the service threads filling in |
690 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv | 695 | * rqstp->rq_task, which only the nfs ones do. Assumes the serv |
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 38649cfa4e81..74cb0d8e9ca1 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -22,6 +22,7 @@ static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); | |||
22 | static int svc_deferred_recv(struct svc_rqst *rqstp); | 22 | static int svc_deferred_recv(struct svc_rqst *rqstp); |
23 | static struct cache_deferred_req *svc_defer(struct cache_req *req); | 23 | static struct cache_deferred_req *svc_defer(struct cache_req *req); |
24 | static void svc_age_temp_xprts(unsigned long closure); | 24 | static void svc_age_temp_xprts(unsigned long closure); |
25 | static void svc_delete_xprt(struct svc_xprt *xprt); | ||
25 | 26 | ||
26 | /* apparently the "standard" is that clients close | 27 | /* apparently the "standard" is that clients close |
27 | * idle connections after 5 minutes, servers after | 28 | * idle connections after 5 minutes, servers after |
@@ -147,8 +148,8 @@ EXPORT_SYMBOL_GPL(svc_xprt_put); | |||
147 | * Called by transport drivers to initialize the transport independent | 148 | * Called by transport drivers to initialize the transport independent |
148 | * portion of the transport instance. | 149 | * portion of the transport instance. |
149 | */ | 150 | */ |
150 | void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, | 151 | void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, |
151 | struct svc_serv *serv) | 152 | struct svc_xprt *xprt, struct svc_serv *serv) |
152 | { | 153 | { |
153 | memset(xprt, 0, sizeof(*xprt)); | 154 | memset(xprt, 0, sizeof(*xprt)); |
154 | xprt->xpt_class = xcl; | 155 | xprt->xpt_class = xcl; |
@@ -163,7 +164,7 @@ void svc_xprt_init(struct svc_xprt_class *xcl, struct svc_xprt *xprt, | |||
163 | spin_lock_init(&xprt->xpt_lock); | 164 | spin_lock_init(&xprt->xpt_lock); |
164 | set_bit(XPT_BUSY, &xprt->xpt_flags); | 165 | set_bit(XPT_BUSY, &xprt->xpt_flags); |
165 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); | 166 | rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); |
166 | xprt->xpt_net = get_net(&init_net); | 167 | xprt->xpt_net = get_net(net); |
167 | } | 168 | } |
168 | EXPORT_SYMBOL_GPL(svc_xprt_init); | 169 | EXPORT_SYMBOL_GPL(svc_xprt_init); |
169 | 170 | ||
@@ -878,7 +879,7 @@ static void call_xpt_users(struct svc_xprt *xprt) | |||
878 | /* | 879 | /* |
879 | * Remove a dead transport | 880 | * Remove a dead transport |
880 | */ | 881 | */ |
881 | void svc_delete_xprt(struct svc_xprt *xprt) | 882 | static void svc_delete_xprt(struct svc_xprt *xprt) |
882 | { | 883 | { |
883 | struct svc_serv *serv = xprt->xpt_server; | 884 | struct svc_serv *serv = xprt->xpt_server; |
884 | struct svc_deferred_req *dr; | 885 | struct svc_deferred_req *dr; |
@@ -893,14 +894,7 @@ void svc_delete_xprt(struct svc_xprt *xprt) | |||
893 | spin_lock_bh(&serv->sv_lock); | 894 | spin_lock_bh(&serv->sv_lock); |
894 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) | 895 | if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) |
895 | list_del_init(&xprt->xpt_list); | 896 | list_del_init(&xprt->xpt_list); |
896 | /* | 897 | BUG_ON(!list_empty(&xprt->xpt_ready)); |
897 | * The only time we're called while xpt_ready is still on a list | ||
898 | * is while the list itself is about to be destroyed (in | ||
899 | * svc_destroy). BUT svc_xprt_enqueue could still be attempting | ||
900 | * to add new entries to the sp_sockets list, so we can't leave | ||
901 | * a freed xprt on it. | ||
902 | */ | ||
903 | list_del_init(&xprt->xpt_ready); | ||
904 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) | 898 | if (test_bit(XPT_TEMP, &xprt->xpt_flags)) |
905 | serv->sv_tmpcnt--; | 899 | serv->sv_tmpcnt--; |
906 | spin_unlock_bh(&serv->sv_lock); | 900 | spin_unlock_bh(&serv->sv_lock); |
@@ -928,22 +922,48 @@ void svc_close_xprt(struct svc_xprt *xprt) | |||
928 | } | 922 | } |
929 | EXPORT_SYMBOL_GPL(svc_close_xprt); | 923 | EXPORT_SYMBOL_GPL(svc_close_xprt); |
930 | 924 | ||
931 | void svc_close_all(struct list_head *xprt_list) | 925 | static void svc_close_list(struct list_head *xprt_list) |
926 | { | ||
927 | struct svc_xprt *xprt; | ||
928 | |||
929 | list_for_each_entry(xprt, xprt_list, xpt_list) { | ||
930 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
931 | set_bit(XPT_BUSY, &xprt->xpt_flags); | ||
932 | } | ||
933 | } | ||
934 | |||
935 | void svc_close_all(struct svc_serv *serv) | ||
932 | { | 936 | { |
937 | struct svc_pool *pool; | ||
933 | struct svc_xprt *xprt; | 938 | struct svc_xprt *xprt; |
934 | struct svc_xprt *tmp; | 939 | struct svc_xprt *tmp; |
940 | int i; | ||
941 | |||
942 | svc_close_list(&serv->sv_tempsocks); | ||
943 | svc_close_list(&serv->sv_permsocks); | ||
935 | 944 | ||
945 | for (i = 0; i < serv->sv_nrpools; i++) { | ||
946 | pool = &serv->sv_pools[i]; | ||
947 | |||
948 | spin_lock_bh(&pool->sp_lock); | ||
949 | while (!list_empty(&pool->sp_sockets)) { | ||
950 | xprt = list_first_entry(&pool->sp_sockets, struct svc_xprt, xpt_ready); | ||
951 | list_del_init(&xprt->xpt_ready); | ||
952 | } | ||
953 | spin_unlock_bh(&pool->sp_lock); | ||
954 | } | ||
936 | /* | 955 | /* |
937 | * The server is shutting down, and no more threads are running. | 956 | * At this point the sp_sockets lists will stay empty, since |
938 | * svc_xprt_enqueue() might still be running, but at worst it | 957 | * svc_enqueue will not add new entries without taking the |
939 | * will re-add the xprt to sp_sockets, which will soon get | 958 | * sp_lock and checking XPT_BUSY. |
940 | * freed. So we don't bother with any more locking, and don't | ||
941 | * leave the close to the (nonexistent) server threads: | ||
942 | */ | 959 | */ |
943 | list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { | 960 | list_for_each_entry_safe(xprt, tmp, &serv->sv_tempsocks, xpt_list) |
944 | set_bit(XPT_CLOSE, &xprt->xpt_flags); | ||
945 | svc_delete_xprt(xprt); | 961 | svc_delete_xprt(xprt); |
946 | } | 962 | list_for_each_entry_safe(xprt, tmp, &serv->sv_permsocks, xpt_list) |
963 | svc_delete_xprt(xprt); | ||
964 | |||
965 | BUG_ON(!list_empty(&serv->sv_permsocks)); | ||
966 | BUG_ON(!list_empty(&serv->sv_tempsocks)); | ||
947 | } | 967 | } |
948 | 968 | ||
949 | /* | 969 | /* |
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 4653286fcc9e..464570906f80 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -739,7 +739,8 @@ static void svc_udp_init(struct svc_sock *svsk, struct svc_serv *serv) | |||
739 | { | 739 | { |
740 | int err, level, optname, one = 1; | 740 | int err, level, optname, one = 1; |
741 | 741 | ||
742 | svc_xprt_init(&svc_udp_class, &svsk->sk_xprt, serv); | 742 | svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_udp_class, |
743 | &svsk->sk_xprt, serv); | ||
743 | clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); | 744 | clear_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); |
744 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; | 745 | svsk->sk_sk->sk_data_ready = svc_udp_data_ready; |
745 | svsk->sk_sk->sk_write_space = svc_write_space; | 746 | svsk->sk_sk->sk_write_space = svc_write_space; |
@@ -1343,7 +1344,8 @@ static void svc_tcp_init(struct svc_sock *svsk, struct svc_serv *serv) | |||
1343 | { | 1344 | { |
1344 | struct sock *sk = svsk->sk_sk; | 1345 | struct sock *sk = svsk->sk_sk; |
1345 | 1346 | ||
1346 | svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt, serv); | 1347 | svc_xprt_init(sock_net(svsk->sk_sock->sk), &svc_tcp_class, |
1348 | &svsk->sk_xprt, serv); | ||
1347 | set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); | 1349 | set_bit(XPT_CACHE_AUTH, &svsk->sk_xprt.xpt_flags); |
1348 | if (sk->sk_state == TCP_LISTEN) { | 1350 | if (sk->sk_state == TCP_LISTEN) { |
1349 | dprintk("setting up TCP socket for listening\n"); | 1351 | dprintk("setting up TCP socket for listening\n"); |
@@ -1659,7 +1661,7 @@ static struct svc_xprt *svc_bc_create_socket(struct svc_serv *serv, | |||
1659 | return ERR_PTR(-ENOMEM); | 1661 | return ERR_PTR(-ENOMEM); |
1660 | 1662 | ||
1661 | xprt = &svsk->sk_xprt; | 1663 | xprt = &svsk->sk_xprt; |
1662 | svc_xprt_init(&svc_tcp_bc_class, xprt, serv); | 1664 | svc_xprt_init(net, &svc_tcp_bc_class, xprt, serv); |
1663 | 1665 | ||
1664 | serv->sv_bc_xprt = xprt; | 1666 | serv->sv_bc_xprt = xprt; |
1665 | 1667 | ||
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index ba1296d88de0..894cb42db91d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -453,7 +453,7 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, | |||
453 | 453 | ||
454 | if (!cma_xprt) | 454 | if (!cma_xprt) |
455 | return NULL; | 455 | return NULL; |
456 | svc_xprt_init(&svc_rdma_class, &cma_xprt->sc_xprt, serv); | 456 | svc_xprt_init(&init_net, &svc_rdma_class, &cma_xprt->sc_xprt, serv); |
457 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); | 457 | INIT_LIST_HEAD(&cma_xprt->sc_accept_q); |
458 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); | 458 | INIT_LIST_HEAD(&cma_xprt->sc_dto_q); |
459 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); | 459 | INIT_LIST_HEAD(&cma_xprt->sc_rq_dto_q); |