summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-18 14:22:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-18 14:22:04 -0500
commit4dd3c2e5a4225e3df85afc6033e62ce8b09f0ed2 (patch)
tree3d1dac5206550994b161eaab8ac73828f410228a /net/sunrpc
parent07c455ee222f3ad219c2835d05a175a326a138fb (diff)
parent22700f3c6df55387cec2ee27c533a7b23c76dc51 (diff)
Merge tag 'nfsd-4.15' of git://linux-nfs.org/~bfields/linux
Pull nfsd updates from Bruce Fields: "Lots of good bugfixes, including: - fix a number of races in the NFSv4+ state code - fix some shutdown crashes in multiple-network-namespace cases - relax our 4.1 session limits; if you've an artificially low limit to the number of 4.1 clients that can mount simultaneously, try upgrading" * tag 'nfsd-4.15' of git://linux-nfs.org/~bfields/linux: (22 commits) SUNRPC: Improve ordering of transport processing nfsd: deal with revoked delegations appropriately svcrdma: Enqueue after setting XPT_CLOSE in completion handlers nfsd: use nfs->ns.inum as net ID rpc: remove some BUG()s svcrdma: Preserve CB send buffer across retransmits nfds: avoid gettimeofday for nfssvc_boot time fs, nfsd: convert nfs4_file.fi_ref from atomic_t to refcount_t fs, nfsd: convert nfs4_cntl_odstate.co_odcount from atomic_t to refcount_t fs, nfsd: convert nfs4_stid.sc_count from atomic_t to refcount_t lockd: double unregister of inetaddr notifiers nfsd4: catch some false session retries nfsd4: fix cached replies to solo SEQUENCE compounds sunrcp: make function _svc_create_xprt static SUNRPC: Fix tracepoint storage issues with svc_recv and svc_rqst_status nfsd: use ARRAY_SIZE nfsd: give out fewer session slots as limit approaches nfsd: increase DRC cache limit nfsd: remove unnecessary nofilehandle checks nfs_common: convert int to bool ...
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c14
-rw-r--r--net/sunrpc/svc_xprt.c106
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_backchannel.c6
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c11
4 files changed, 55 insertions, 82 deletions
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 7b1ee5a0b03c..73165e9ca5bf 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -855,11 +855,13 @@ unwrap_integ_data(struct svc_rqst *rqstp, struct xdr_buf *buf, u32 seq, struct g
855 return stat; 855 return stat;
856 if (integ_len > buf->len) 856 if (integ_len > buf->len)
857 return stat; 857 return stat;
858 if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) 858 if (xdr_buf_subsegment(buf, &integ_buf, 0, integ_len)) {
859 BUG(); 859 WARN_ON_ONCE(1);
860 return stat;
861 }
860 /* copy out mic... */ 862 /* copy out mic... */
861 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len)) 863 if (read_u32_from_xdr_buf(buf, integ_len, &mic.len))
862 BUG(); 864 return stat;
863 if (mic.len > RPC_MAX_AUTH_SIZE) 865 if (mic.len > RPC_MAX_AUTH_SIZE)
864 return stat; 866 return stat;
865 mic.data = kmalloc(mic.len, GFP_KERNEL); 867 mic.data = kmalloc(mic.len, GFP_KERNEL);
@@ -1611,8 +1613,10 @@ svcauth_gss_wrap_resp_integ(struct svc_rqst *rqstp)
1611 BUG_ON(integ_len % 4); 1613 BUG_ON(integ_len % 4);
1612 *p++ = htonl(integ_len); 1614 *p++ = htonl(integ_len);
1613 *p++ = htonl(gc->gc_seq); 1615 *p++ = htonl(gc->gc_seq);
1614 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) 1616 if (xdr_buf_subsegment(resbuf, &integ_buf, integ_offset, integ_len)) {
1615 BUG(); 1617 WARN_ON_ONCE(1);
1618 goto out_err;
1619 }
1616 if (resbuf->tail[0].iov_base == NULL) { 1620 if (resbuf->tail[0].iov_base == NULL) {
1617 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE) 1621 if (resbuf->head[0].iov_len + RPC_MAX_AUTH_SIZE > PAGE_SIZE)
1618 goto out_err; 1622 goto out_err;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 71de77bd4423..e8e0831229cf 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -250,9 +250,9 @@ void svc_add_new_perm_xprt(struct svc_serv *serv, struct svc_xprt *new)
250 svc_xprt_received(new); 250 svc_xprt_received(new);
251} 251}
252 252
253int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name, 253static int _svc_create_xprt(struct svc_serv *serv, const char *xprt_name,
254 struct net *net, const int family, 254 struct net *net, const int family,
255 const unsigned short port, int flags) 255 const unsigned short port, int flags)
256{ 256{
257 struct svc_xprt_class *xcl; 257 struct svc_xprt_class *xcl;
258 258
@@ -380,7 +380,6 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
380 struct svc_pool *pool; 380 struct svc_pool *pool;
381 struct svc_rqst *rqstp = NULL; 381 struct svc_rqst *rqstp = NULL;
382 int cpu; 382 int cpu;
383 bool queued = false;
384 383
385 if (!svc_xprt_has_something_to_do(xprt)) 384 if (!svc_xprt_has_something_to_do(xprt))
386 goto out; 385 goto out;
@@ -401,58 +400,25 @@ void svc_xprt_do_enqueue(struct svc_xprt *xprt)
401 400
402 atomic_long_inc(&pool->sp_stats.packets); 401 atomic_long_inc(&pool->sp_stats.packets);
403 402
404redo_search: 403 dprintk("svc: transport %p put into queue\n", xprt);
404 spin_lock_bh(&pool->sp_lock);
405 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
406 pool->sp_stats.sockets_queued++;
407 spin_unlock_bh(&pool->sp_lock);
408
405 /* find a thread for this xprt */ 409 /* find a thread for this xprt */
406 rcu_read_lock(); 410 rcu_read_lock();
407 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) { 411 list_for_each_entry_rcu(rqstp, &pool->sp_all_threads, rq_all) {
408 /* Do a lockless check first */ 412 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags))
409 if (test_bit(RQ_BUSY, &rqstp->rq_flags))
410 continue; 413 continue;
411
412 /*
413 * Once the xprt has been queued, it can only be dequeued by
414 * the task that intends to service it. All we can do at that
415 * point is to try to wake this thread back up so that it can
416 * do so.
417 */
418 if (!queued) {
419 spin_lock_bh(&rqstp->rq_lock);
420 if (test_and_set_bit(RQ_BUSY, &rqstp->rq_flags)) {
421 /* already busy, move on... */
422 spin_unlock_bh(&rqstp->rq_lock);
423 continue;
424 }
425
426 /* this one will do */
427 rqstp->rq_xprt = xprt;
428 svc_xprt_get(xprt);
429 spin_unlock_bh(&rqstp->rq_lock);
430 }
431 rcu_read_unlock();
432
433 atomic_long_inc(&pool->sp_stats.threads_woken); 414 atomic_long_inc(&pool->sp_stats.threads_woken);
434 wake_up_process(rqstp->rq_task); 415 wake_up_process(rqstp->rq_task);
435 put_cpu(); 416 goto out_unlock;
436 goto out;
437 }
438 rcu_read_unlock();
439
440 /*
441 * We didn't find an idle thread to use, so we need to queue the xprt.
442 * Do so and then search again. If we find one, we can't hook this one
443 * up to it directly but we can wake the thread up in the hopes that it
444 * will pick it up once it searches for a xprt to service.
445 */
446 if (!queued) {
447 queued = true;
448 dprintk("svc: transport %p put into queue\n", xprt);
449 spin_lock_bh(&pool->sp_lock);
450 list_add_tail(&xprt->xpt_ready, &pool->sp_sockets);
451 pool->sp_stats.sockets_queued++;
452 spin_unlock_bh(&pool->sp_lock);
453 goto redo_search;
454 } 417 }
418 set_bit(SP_CONGESTED, &pool->sp_flags);
455 rqstp = NULL; 419 rqstp = NULL;
420out_unlock:
421 rcu_read_unlock();
456 put_cpu(); 422 put_cpu();
457out: 423out:
458 trace_svc_xprt_do_enqueue(xprt, rqstp); 424 trace_svc_xprt_do_enqueue(xprt, rqstp);
@@ -721,38 +687,25 @@ rqst_should_sleep(struct svc_rqst *rqstp)
721 687
722static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout) 688static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
723{ 689{
724 struct svc_xprt *xprt;
725 struct svc_pool *pool = rqstp->rq_pool; 690 struct svc_pool *pool = rqstp->rq_pool;
726 long time_left = 0; 691 long time_left = 0;
727 692
728 /* rq_xprt should be clear on entry */ 693 /* rq_xprt should be clear on entry */
729 WARN_ON_ONCE(rqstp->rq_xprt); 694 WARN_ON_ONCE(rqstp->rq_xprt);
730 695
731 /* Normally we will wait up to 5 seconds for any required 696 rqstp->rq_xprt = svc_xprt_dequeue(pool);
732 * cache information to be provided. 697 if (rqstp->rq_xprt)
733 */ 698 goto out_found;
734 rqstp->rq_chandle.thread_wait = 5*HZ;
735
736 xprt = svc_xprt_dequeue(pool);
737 if (xprt) {
738 rqstp->rq_xprt = xprt;
739
740 /* As there is a shortage of threads and this request
741 * had to be queued, don't allow the thread to wait so
742 * long for cache updates.
743 */
744 rqstp->rq_chandle.thread_wait = 1*HZ;
745 clear_bit(SP_TASK_PENDING, &pool->sp_flags);
746 return xprt;
747 }
748 699
749 /* 700 /*
750 * We have to be able to interrupt this wait 701 * We have to be able to interrupt this wait
751 * to bring down the daemons ... 702 * to bring down the daemons ...
752 */ 703 */
753 set_current_state(TASK_INTERRUPTIBLE); 704 set_current_state(TASK_INTERRUPTIBLE);
705 smp_mb__before_atomic();
706 clear_bit(SP_CONGESTED, &pool->sp_flags);
754 clear_bit(RQ_BUSY, &rqstp->rq_flags); 707 clear_bit(RQ_BUSY, &rqstp->rq_flags);
755 smp_mb(); 708 smp_mb__after_atomic();
756 709
757 if (likely(rqst_should_sleep(rqstp))) 710 if (likely(rqst_should_sleep(rqstp)))
758 time_left = schedule_timeout(timeout); 711 time_left = schedule_timeout(timeout);
@@ -761,13 +714,11 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
761 714
762 try_to_freeze(); 715 try_to_freeze();
763 716
764 spin_lock_bh(&rqstp->rq_lock);
765 set_bit(RQ_BUSY, &rqstp->rq_flags); 717 set_bit(RQ_BUSY, &rqstp->rq_flags);
766 spin_unlock_bh(&rqstp->rq_lock); 718 smp_mb__after_atomic();
767 719 rqstp->rq_xprt = svc_xprt_dequeue(pool);
768 xprt = rqstp->rq_xprt; 720 if (rqstp->rq_xprt)
769 if (xprt != NULL) 721 goto out_found;
770 return xprt;
771 722
772 if (!time_left) 723 if (!time_left)
773 atomic_long_inc(&pool->sp_stats.threads_timedout); 724 atomic_long_inc(&pool->sp_stats.threads_timedout);
@@ -775,6 +726,15 @@ static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
775 if (signalled() || kthread_should_stop()) 726 if (signalled() || kthread_should_stop())
776 return ERR_PTR(-EINTR); 727 return ERR_PTR(-EINTR);
777 return ERR_PTR(-EAGAIN); 728 return ERR_PTR(-EAGAIN);
729out_found:
730 /* Normally we will wait up to 5 seconds for any required
731 * cache information to be provided.
732 */
733 if (!test_bit(SP_CONGESTED, &pool->sp_flags))
734 rqstp->rq_chandle.thread_wait = 5*HZ;
735 else
736 rqstp->rq_chandle.thread_wait = 1*HZ;
737 return rqstp->rq_xprt;
778} 738}
779 739
780static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt) 740static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
index 992594b7cc6b..af7893501e40 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
@@ -133,6 +133,10 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma,
133 if (ret) 133 if (ret)
134 goto out_err; 134 goto out_err;
135 135
136 /* Bump page refcnt so Send completion doesn't release
137 * the rq_buffer before all retransmits are complete.
138 */
139 get_page(virt_to_page(rqst->rq_buffer));
136 ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0); 140 ret = svc_rdma_post_send_wr(rdma, ctxt, 1, 0);
137 if (ret) 141 if (ret)
138 goto out_unmap; 142 goto out_unmap;
@@ -165,7 +169,6 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
165 return -EINVAL; 169 return -EINVAL;
166 } 170 }
167 171
168 /* svc_rdma_sendto releases this page */
169 page = alloc_page(RPCRDMA_DEF_GFP); 172 page = alloc_page(RPCRDMA_DEF_GFP);
170 if (!page) 173 if (!page)
171 return -ENOMEM; 174 return -ENOMEM;
@@ -184,6 +187,7 @@ xprt_rdma_bc_free(struct rpc_task *task)
184{ 187{
185 struct rpc_rqst *rqst = task->tk_rqstp; 188 struct rpc_rqst *rqst = task->tk_rqstp;
186 189
190 put_page(virt_to_page(rqst->rq_buffer));
187 kfree(rqst->rq_rbuffer); 191 kfree(rqst->rq_rbuffer);
188} 192}
189 193
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 5caf8e722a11..46ec069150d5 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -290,6 +290,7 @@ static void qp_event_handler(struct ib_event *event, void *context)
290 ib_event_msg(event->event), event->event, 290 ib_event_msg(event->event), event->event,
291 event->element.qp); 291 event->element.qp);
292 set_bit(XPT_CLOSE, &xprt->xpt_flags); 292 set_bit(XPT_CLOSE, &xprt->xpt_flags);
293 svc_xprt_enqueue(xprt);
293 break; 294 break;
294 } 295 }
295} 296}
@@ -322,8 +323,7 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
322 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); 323 set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
323 if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) 324 if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
324 goto out; 325 goto out;
325 svc_xprt_enqueue(&xprt->sc_xprt); 326 goto out_enqueue;
326 goto out;
327 327
328flushed: 328flushed:
329 if (wc->status != IB_WC_WR_FLUSH_ERR) 329 if (wc->status != IB_WC_WR_FLUSH_ERR)
@@ -333,6 +333,8 @@ flushed:
333 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 333 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
334 svc_rdma_put_context(ctxt, 1); 334 svc_rdma_put_context(ctxt, 1);
335 335
336out_enqueue:
337 svc_xprt_enqueue(&xprt->sc_xprt);
336out: 338out:
337 svc_xprt_put(&xprt->sc_xprt); 339 svc_xprt_put(&xprt->sc_xprt);
338} 340}
@@ -358,6 +360,7 @@ void svc_rdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
358 360
359 if (unlikely(wc->status != IB_WC_SUCCESS)) { 361 if (unlikely(wc->status != IB_WC_SUCCESS)) {
360 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 362 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
363 svc_xprt_enqueue(&xprt->sc_xprt);
361 if (wc->status != IB_WC_WR_FLUSH_ERR) 364 if (wc->status != IB_WC_WR_FLUSH_ERR)
362 pr_err("svcrdma: Send: %s (%u/0x%x)\n", 365 pr_err("svcrdma: Send: %s (%u/0x%x)\n",
363 ib_wc_status_msg(wc->status), 366 ib_wc_status_msg(wc->status),
@@ -569,8 +572,10 @@ static int rdma_listen_handler(struct rdma_cm_id *cma_id,
569 case RDMA_CM_EVENT_DEVICE_REMOVAL: 572 case RDMA_CM_EVENT_DEVICE_REMOVAL:
570 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n", 573 dprintk("svcrdma: Device removal xprt=%p, cm_id=%p\n",
571 xprt, cma_id); 574 xprt, cma_id);
572 if (xprt) 575 if (xprt) {
573 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); 576 set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
577 svc_xprt_enqueue(&xprt->sc_xprt);
578 }
574 break; 579 break;
575 580
576 default: 581 default: