diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-18 14:22:04 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-18 14:22:04 -0500 |
commit | 4dd3c2e5a4225e3df85afc6033e62ce8b09f0ed2 (patch) | |
tree | 3d1dac5206550994b161eaab8ac73828f410228a /net/sunrpc/svc_xprt.c | |
parent | 07c455ee222f3ad219c2835d05a175a326a138fb (diff) | |
parent | 22700f3c6df55387cec2ee27c533a7b23c76dc51 (diff) |
Merge tag 'nfsd-4.15' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields:
"Lots of good bugfixes, including:
- fix a number of races in the NFSv4+ state code
- fix some shutdown crashes in multiple-network-namespace cases
- relax our 4.1 session limits; if you've an artificially low limit
to the number of 4.1 clients that can mount simultaneously, try
upgrading"
* tag 'nfsd-4.15' of git://linux-nfs.org/~bfields/linux: (22 commits)
SUNRPC: Improve ordering of transport processing
nfsd: deal with revoked delegations appropriately
svcrdma: Enqueue after setting XPT_CLOSE in completion handlers
nfsd: use nfs->ns.inum as net ID
rpc: remove some BUG()s
svcrdma: Preserve CB send buffer across retransmits
nfds: avoid gettimeofday for nfssvc_boot time
fs, nfsd: convert nfs4_file.fi_ref from atomic_t to refcount_t
fs, nfsd: convert nfs4_cntl_odstate.co_odcount from atomic_t to refcount_t
fs, nfsd: convert nfs4_stid.sc_count from atomic_t to refcount_t
lockd: double unregister of inetaddr notifiers
nfsd4: catch some false session retries
nfsd4: fix cached replies to solo SEQUENCE compounds
sunrcp: make function _svc_create_xprt static
SUNRPC: Fix tracepoint storage issues with svc_recv and svc_rqst_status
nfsd: use ARRAY_SIZE
nfsd: give out fewer session slots as limit approaches
nfsd: increase DRC cache limit
nfsd: remove unnecessary nofilehandle checks
nfs_common: convert int to bool
...
Diffstat (limited to 'net/sunrpc/svc_xprt.c')
-rw-r--r-- | net/sunrpc/svc_xprt.c | 106 |
1 files changed, 33 insertions, 73 deletions
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c index 71de77bd4423..e8e0831229cf 100644 --- a/net/sunrpc/svc_xprt.c +++ b/net/sunrpc/svc_xprt.c | |||
@@ -250,9 +250,9 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new) | |||
250 | svc_xprt_received(new); | 250 | svc_xprt_received(new); |
251 | } | 251 | } |
252 | 252 | ||
253 | int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, | 253 | static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, |
254 | struct net *net, const int family, | 254 | struct net *net, const int family, |
255 | const unsigned short port, int flags) | 255 | const unsigned short port, int flags) |
256 | { | 256 | { |
257 | struct svc_xprt_class *xcl; | 257 | struct svc_xprt_class *xcl; |
258 | 258 | ||
@@ -380,7 +380,6 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) | |||
380 | struct svc_pool *pool; | 380 | struct svc_pool *pool; |
381 | struct svc_rqst *rqstp = NULL; | 381 | struct svc_rqst *rqstp = NULL; |
382 | int cpu; | 382 | int cpu; |
383 | bool queued = false; | ||
384 | 383 | ||
385 | if (!svc_xprt_has_something_to_do(xprt)) | 384 | if (!svc_xprt_has_something_to_do(xprt)) |
386 | goto out; | 385 | goto out; |
@@ -401,58 +400,25 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt) | |||
401 | 400 | ||
402 | atomic_long_inc(&pool->sp_stats.packets); | 401 | atomic_long_inc(&pool->sp_stats.packets); |
403 | 402 | ||
404 | redo_search: | 403 | dprintk("svc: transport %p put into queue\n", xprt); |
404 | spin_lock_bh(&pool->sp_lock); | ||
405 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | ||
406 | pool->sp_stats.sockets_queued++; | ||
407 | spin_unlock_bh(&pool->sp_lock); | ||
408 | |||
405 | /* find a thread for this xprt */ | 409 | /* find a thread for this xprt */ |
406 | rcu_read_lock(); | 410 | rcu_read_lock(); |
407 | list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { | 411 | list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { |
408 | /* Do a lockless check first */ | 412 | if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) |
409 | if (test_bit(RQ_BUSY, &rqstp->rq_flags)) | ||
410 | continue; | 413 | continue; |
411 | |||
412 | /* | ||
413 | * Once the xprt has been queued, it can only be dequeued by | ||
414 | * the task that intends to service it. All we can do at that | ||
415 | * point is to try to wake this thread back up so that it can | ||
416 | * do so. | ||
417 | */ | ||
418 | if (!queued) { | ||
419 | spin_lock_bh(&rqstp->rq_lock); | ||
420 | if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) { | ||
421 | /* already busy, move on... */ | ||
422 | spin_unlock_bh(&rqstp->rq_lock); | ||
423 | continue; | ||
424 | } | ||
425 | |||
426 | /* this one will do */ | ||
427 | rqstp->rq_xprt = xprt; | ||
428 | svc_xprt_get(xprt); | ||
429 | spin_unlock_bh(&rqstp->rq_lock); | ||
430 | } | ||
431 | rcu_read_unlock(); | ||
432 | |||
433 | atomic_long_inc(&pool->sp_stats.threads_woken); | 414 | atomic_long_inc(&pool->sp_stats.threads_woken); |
434 | wake_up_process(rqstp->rq_task); | 415 | wake_up_process(rqstp->rq_task); |
435 | put_cpu(); | 416 | goto out_unlock; |
436 | goto out; | ||
437 | } | ||
438 | rcu_read_unlock(); | ||
439 | |||
440 | /* | ||
441 | * We didn't find an idle thread to use, so we need to queue the xprt. | ||
442 | * Do so and then search again. If we find one, we can't hook this one | ||
443 | * up to it directly but we can wake the thread up in the hopes that it | ||
444 | * will pick it up once it searches for a xprt to service. | ||
445 | */ | ||
446 | if (!queued) { | ||
447 | queued = true; | ||
448 | dprintk("svc: transport %p put into queue\n", xprt); | ||
449 | spin_lock_bh(&pool->sp_lock); | ||
450 | list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); | ||
451 | pool->sp_stats.sockets_queued++; | ||
452 | spin_unlock_bh(&pool->sp_lock); | ||
453 | goto redo_search; | ||
454 | } | 417 | } |
418 | set_bit(SP_CONGESTED, &pool->sp_flags); | ||
455 | rqstp = NULL; | 419 | rqstp = NULL; |
420 | out_unlock: | ||
421 | rcu_read_unlock(); | ||
456 | put_cpu(); | 422 | put_cpu(); |
457 | out: | 423 | out: |
458 | trace_svc_xprt_do_enqueue(xprt, rqstp); | 424 | trace_svc_xprt_do_enqueue(xprt, rqstp); |
@@ -721,38 +687,25 @@ rqst_should_sleep(struct svc_rqst *rqstp) | |||
721 | 687 | ||
722 | static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | 688 | static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) |
723 | { | 689 | { |
724 | struct svc_xprt *xprt; | ||
725 | struct svc_pool *pool = rqstp->rq_pool; | 690 | struct svc_pool *pool = rqstp->rq_pool; |
726 | long time_left = 0; | 691 | long time_left = 0; |
727 | 692 | ||
728 | /* rq_xprt should be clear on entry */ | 693 | /* rq_xprt should be clear on entry */ |
729 | WARN_ON_ONCE(rqstp->rq_xprt); | 694 | WARN_ON_ONCE(rqstp->rq_xprt); |
730 | 695 | ||
731 | /* Normally we will wait up to 5 seconds for any required | 696 | rqstp->rq_xprt = svc_xprt_dequeue(pool); |
732 | * cache information to be provided. | 697 | if (rqstp->rq_xprt) |
733 | */ | 698 | goto out_found; |
734 | rqstp->rq_chandle.thread_wait = 5*HZ; | ||
735 | |||
736 | xprt = svc_xprt_dequeue(pool); | ||
737 | if (xprt) { | ||
738 | rqstp->rq_xprt = xprt; | ||
739 | |||
740 | /* As there is a shortage of threads and this request | ||
741 | * had to be queued, don't allow the thread to wait so | ||
742 | * long for cache updates. | ||
743 | */ | ||
744 | rqstp->rq_chandle.thread_wait = 1*HZ; | ||
745 | clear_bit(SP_TASK_PENDING, &pool->sp_flags); | ||
746 | return xprt; | ||
747 | } | ||
748 | 699 | ||
749 | /* | 700 | /* |
750 | * We have to be able to interrupt this wait | 701 | * We have to be able to interrupt this wait |
751 | * to bring down the daemons ... | 702 | * to bring down the daemons ... |
752 | */ | 703 | */ |
753 | set_current_state(TASK_INTERRUPTIBLE); | 704 | set_current_state(TASK_INTERRUPTIBLE); |
705 | smp_mb__before_atomic(); | ||
706 | clear_bit(SP_CONGESTED, &pool->sp_flags); | ||
754 | clear_bit(RQ_BUSY, &rqstp->rq_flags); | 707 | clear_bit(RQ_BUSY, &rqstp->rq_flags); |
755 | smp_mb(); | 708 | smp_mb__after_atomic(); |
756 | 709 | ||
757 | if (likely(rqst_should_sleep(rqstp))) | 710 | if (likely(rqst_should_sleep(rqstp))) |
758 | time_left = schedule_timeout(timeout); | 711 | time_left = schedule_timeout(timeout); |
@@ -761,13 +714,11 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
761 | 714 | ||
762 | try_to_freeze(); | 715 | try_to_freeze(); |
763 | 716 | ||
764 | spin_lock_bh(&rqstp->rq_lock); | ||
765 | set_bit(RQ_BUSY, &rqstp->rq_flags); | 717 | set_bit(RQ_BUSY, &rqstp->rq_flags); |
766 | spin_unlock_bh(&rqstp->rq_lock); | 718 | smp_mb__after_atomic(); |
767 | 719 | rqstp->rq_xprt = svc_xprt_dequeue(pool); | |
768 | xprt = rqstp->rq_xprt; | 720 | if (rqstp->rq_xprt) |
769 | if (xprt != NULL) | 721 | goto out_found; |
770 | return xprt; | ||
771 | 722 | ||
772 | if (!time_left) | 723 | if (!time_left) |
773 | atomic_long_inc(&pool->sp_stats.threads_timedout); | 724 | atomic_long_inc(&pool->sp_stats.threads_timedout); |
@@ -775,6 +726,15 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) | |||
775 | if (signalled() || kthread_should_stop()) | 726 | if (signalled() || kthread_should_stop()) |
776 | return ERR_PTR(-EINTR); | 727 | return ERR_PTR(-EINTR); |
777 | return ERR_PTR(-EAGAIN); | 728 | return ERR_PTR(-EAGAIN); |
729 | out_found: | ||
730 | /* Normally we will wait up to 5 seconds for any required | ||
731 | * cache information to be provided. | ||
732 | */ | ||
733 | if (!test_bit(SP_CONGESTED, &pool->sp_flags)) | ||
734 | rqstp->rq_chandle.thread_wait = 5*HZ; | ||
735 | else | ||
736 | rqstp->rq_chandle.thread_wait = 1*HZ; | ||
737 | return rqstp->rq_xprt; | ||
778 | } | 738 | } |
779 | 739 | ||
780 | static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) | 740 | static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) |