diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-08 18:18:32 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-02-08 18:18:32 -0500 |
commit | f1517df8701c9f12dae9ce7f43a5d300a6917619 (patch) | |
tree | b7ba72e70fcdf17306d2a488b5b245e578ad52aa /net | |
parent | 9d21874da8ec0e0043c85cde8dda173e74ffc24d (diff) | |
parent | 175e03101d36c3034f3c80038d4c28838351a7f2 (diff) |
Merge tag 'nfsd-4.16' of git://linux-nfs.org/~bfields/linux
Pull nfsd update from Bruce Fields:
"A fairly small update this time around. Some cleanup, RDMA fixes,
overlayfs fixes, and a fix for an NFSv4 state bug.
The bigger deal for nfsd this time around was Jeff Layton's
already-merged i_version patches"
* tag 'nfsd-4.16' of git://linux-nfs.org/~bfields/linux:
svcrdma: Fix Read chunk round-up
NFSD: hide unused svcxdr_dupstr()
nfsd: store stat times in fill_pre_wcc() instead of inode times
nfsd: encode stat->mtime for getattr instead of inode->i_mtime
nfsd: return RESOURCE not GARBAGE_ARGS on too many ops
nfsd4: don't set lock stateid's sc_type to CLOSED
nfsd: Detect unhashed stids in nfsd4_verify_open_stid()
sunrpc: remove dead code in svc_sock_setbufsize
svcrdma: Post Receives in the Receive completion handler
nfsd4: permit layoutget of executable-only files
lockd: convert nlm_rqst.a_count from atomic_t to refcount_t
lockd: convert nlm_lockowner.count from atomic_t to refcount_t
lockd: convert nsm_handle.sm_count from atomic_t to refcount_t
Diffstat (limited to 'net')
-rw-r--r-- | net/sunrpc/svcsock.c | 14 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 5 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 9 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_rw.c | 12 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_sendto.c | 6 | ||||
-rw-r--r-- | net/sunrpc/xprtrdma/svc_rdma_transport.c | 25 |
6 files changed, 16 insertions, 55 deletions
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index 5570719e4787..943f2a745cd5 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c | |||
@@ -384,25 +384,11 @@ static int svc_partial_recvfrom(struct svc_rqst *rqstp, | |||
384 | static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, | 384 | static void svc_sock_setbufsize(struct socket *sock, unsigned int snd, |
385 | unsigned int rcv) | 385 | unsigned int rcv) |
386 | { | 386 | { |
387 | #if 0 | ||
388 | mm_segment_t oldfs; | ||
389 | oldfs = get_fs(); set_fs(KERNEL_DS); | ||
390 | sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, | ||
391 | (char*)&snd, sizeof(snd)); | ||
392 | sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, | ||
393 | (char*)&rcv, sizeof(rcv)); | ||
394 | #else | ||
395 | /* sock_setsockopt limits use to sysctl_?mem_max, | ||
396 | * which isn't acceptable. Until that is made conditional | ||
397 | * on not having CAP_SYS_RESOURCE or similar, we go direct... | ||
398 | * DaveM said I could! | ||
399 | */ | ||
400 | lock_sock(sock->sk); | 387 | lock_sock(sock->sk); |
401 | sock->sk->sk_sndbuf = snd * 2; | 388 | sock->sk->sk_sndbuf = snd * 2; |
402 | sock->sk->sk_rcvbuf = rcv * 2; | 389 | sock->sk->sk_rcvbuf = rcv * 2; |
403 | sock->sk->sk_write_space(sock->sk); | 390 | sock->sk->sk_write_space(sock->sk); |
404 | release_sock(sock->sk); | 391 | release_sock(sock->sk); |
405 | #endif | ||
406 | } | 392 | } |
407 | 393 | ||
408 | static int svc_sock_secure_port(struct svc_rqst *rqstp) | 394 | static int svc_sock_secure_port(struct svc_rqst *rqstp) |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c index af7893501e40..a73632ca9048 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c +++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c | |||
@@ -95,7 +95,6 @@ out_shortreply: | |||
95 | out_notfound: | 95 | out_notfound: |
96 | dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", | 96 | dprintk("svcrdma: unrecognized bc reply: xprt=%p, xid=%08x\n", |
97 | xprt, be32_to_cpu(xid)); | 97 | xprt, be32_to_cpu(xid)); |
98 | |||
99 | goto out_unlock; | 98 | goto out_unlock; |
100 | } | 99 | } |
101 | 100 | ||
@@ -129,10 +128,6 @@ static int svc_rdma_bc_sendto(struct svcxprt_rdma *rdma, | |||
129 | if (ret < 0) | 128 | if (ret < 0) |
130 | goto out_err; | 129 | goto out_err; |
131 | 130 | ||
132 | ret = svc_rdma_repost_recv(rdma, GFP_NOIO); | ||
133 | if (ret) | ||
134 | goto out_err; | ||
135 | |||
136 | /* Bump page refcnt so Send completion doesn't release | 131 | /* Bump page refcnt so Send completion doesn't release |
137 | * the rq_buffer before all retransmits are complete. | 132 | * the rq_buffer before all retransmits are complete. |
138 | */ | 133 | */ |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c index ad4bd62eebf1..19e9c6b33042 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c +++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | |||
@@ -400,10 +400,6 @@ static void svc_rdma_send_error(struct svcxprt_rdma *xprt, | |||
400 | struct page *page; | 400 | struct page *page; |
401 | int ret; | 401 | int ret; |
402 | 402 | ||
403 | ret = svc_rdma_repost_recv(xprt, GFP_KERNEL); | ||
404 | if (ret) | ||
405 | return; | ||
406 | |||
407 | page = alloc_page(GFP_KERNEL); | 403 | page = alloc_page(GFP_KERNEL); |
408 | if (!page) | 404 | if (!page) |
409 | return; | 405 | return; |
@@ -554,8 +550,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp) | |||
554 | ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, | 550 | ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, p, |
555 | &rqstp->rq_arg); | 551 | &rqstp->rq_arg); |
556 | svc_rdma_put_context(ctxt, 0); | 552 | svc_rdma_put_context(ctxt, 0); |
557 | if (ret) | ||
558 | goto repost; | ||
559 | return ret; | 553 | return ret; |
560 | } | 554 | } |
561 | 555 | ||
@@ -590,6 +584,5 @@ out_postfail: | |||
590 | 584 | ||
591 | out_drop: | 585 | out_drop: |
592 | svc_rdma_put_context(ctxt, 1); | 586 | svc_rdma_put_context(ctxt, 1); |
593 | repost: | 587 | return 0; |
594 | return svc_rdma_repost_recv(rdma_xprt, GFP_KERNEL); | ||
595 | } | 588 | } |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_rw.c b/net/sunrpc/xprtrdma/svc_rdma_rw.c index 9bd04549a1ad..12b9a7e0b6d2 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_rw.c +++ b/net/sunrpc/xprtrdma/svc_rdma_rw.c | |||
@@ -727,12 +727,16 @@ static int svc_rdma_build_normal_read_chunk(struct svc_rqst *rqstp, | |||
727 | head->arg.head[0].iov_len - info->ri_position; | 727 | head->arg.head[0].iov_len - info->ri_position; |
728 | head->arg.head[0].iov_len = info->ri_position; | 728 | head->arg.head[0].iov_len = info->ri_position; |
729 | 729 | ||
730 | /* Read chunk may need XDR roundup (see RFC 5666, s. 3.7). | 730 | /* Read chunk may need XDR roundup (see RFC 8166, s. 3.4.5.2). |
731 | * | 731 | * |
732 | * NFSv2/3 write decoders need the length of the tail to | 732 | * If the client already rounded up the chunk length, the |
733 | * contain the size of the roundup padding. | 733 | * length does not change. Otherwise, the length of the page |
734 | * list is increased to include XDR round-up. | ||
735 | * | ||
736 | * Currently these chunks always start at page offset 0, | ||
737 | * thus the rounded-up length never crosses a page boundary. | ||
734 | */ | 738 | */ |
735 | head->arg.tail[0].iov_len += 4 - (info->ri_chunklen & 3); | 739 | info->ri_chunklen = XDR_QUADLEN(info->ri_chunklen) << 2; |
736 | 740 | ||
737 | head->arg.page_len = info->ri_chunklen; | 741 | head->arg.page_len = info->ri_chunklen; |
738 | head->arg.len += info->ri_chunklen; | 742 | head->arg.len += info->ri_chunklen; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c index 7c3a211e0e9a..649441d5087d 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c +++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c | |||
@@ -674,9 +674,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
674 | svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); | 674 | svc_rdma_xdr_encode_reply_chunk(rdma_resp, rp_ch, ret); |
675 | } | 675 | } |
676 | 676 | ||
677 | ret = svc_rdma_post_recv(rdma, GFP_KERNEL); | ||
678 | if (ret) | ||
679 | goto err1; | ||
680 | ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp, | 677 | ret = svc_rdma_send_reply_msg(rdma, rdma_argp, rdma_resp, rqstp, |
681 | wr_lst, rp_ch); | 678 | wr_lst, rp_ch); |
682 | if (ret < 0) | 679 | if (ret < 0) |
@@ -687,9 +684,6 @@ int svc_rdma_sendto(struct svc_rqst *rqstp) | |||
687 | if (ret != -E2BIG && ret != -EINVAL) | 684 | if (ret != -E2BIG && ret != -EINVAL) |
688 | goto err1; | 685 | goto err1; |
689 | 686 | ||
690 | ret = svc_rdma_post_recv(rdma, GFP_KERNEL); | ||
691 | if (ret) | ||
692 | goto err1; | ||
693 | ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp); | 687 | ret = svc_rdma_send_error_msg(rdma, rdma_resp, rqstp); |
694 | if (ret < 0) | 688 | if (ret < 0) |
695 | goto err0; | 689 | goto err0; |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 46ec069150d5..9ad12a215b51 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -58,6 +58,7 @@ | |||
58 | 58 | ||
59 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT | 59 | #define RPCDBG_FACILITY RPCDBG_SVCXPRT |
60 | 60 | ||
61 | static int svc_rdma_post_recv(struct svcxprt_rdma *xprt); | ||
61 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); | 62 | static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *, int); |
62 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | 63 | static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, |
63 | struct net *net, | 64 | struct net *net, |
@@ -320,6 +321,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) | |||
320 | list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q); | 321 | list_add_tail(&ctxt->list, &xprt->sc_rq_dto_q); |
321 | spin_unlock(&xprt->sc_rq_dto_lock); | 322 | spin_unlock(&xprt->sc_rq_dto_lock); |
322 | 323 | ||
324 | svc_rdma_post_recv(xprt); | ||
325 | |||
323 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); | 326 | set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags); |
324 | if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) | 327 | if (test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags)) |
325 | goto out; | 328 | goto out; |
@@ -404,7 +407,8 @@ static struct svcxprt_rdma *rdma_create_xprt(struct svc_serv *serv, | |||
404 | return cma_xprt; | 407 | return cma_xprt; |
405 | } | 408 | } |
406 | 409 | ||
407 | int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) | 410 | static int |
411 | svc_rdma_post_recv(struct svcxprt_rdma *xprt) | ||
408 | { | 412 | { |
409 | struct ib_recv_wr recv_wr, *bad_recv_wr; | 413 | struct ib_recv_wr recv_wr, *bad_recv_wr; |
410 | struct svc_rdma_op_ctxt *ctxt; | 414 | struct svc_rdma_op_ctxt *ctxt; |
@@ -423,7 +427,7 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) | |||
423 | pr_err("svcrdma: Too many sges (%d)\n", sge_no); | 427 | pr_err("svcrdma: Too many sges (%d)\n", sge_no); |
424 | goto err_put_ctxt; | 428 | goto err_put_ctxt; |
425 | } | 429 | } |
426 | page = alloc_page(flags); | 430 | page = alloc_page(GFP_KERNEL); |
427 | if (!page) | 431 | if (!page) |
428 | goto err_put_ctxt; | 432 | goto err_put_ctxt; |
429 | ctxt->pages[sge_no] = page; | 433 | ctxt->pages[sge_no] = page; |
@@ -459,21 +463,6 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt, gfp_t flags) | |||
459 | return -ENOMEM; | 463 | return -ENOMEM; |
460 | } | 464 | } |
461 | 465 | ||
462 | int svc_rdma_repost_recv(struct svcxprt_rdma *xprt, gfp_t flags) | ||
463 | { | ||
464 | int ret = 0; | ||
465 | |||
466 | ret = svc_rdma_post_recv(xprt, flags); | ||
467 | if (ret) { | ||
468 | pr_err("svcrdma: could not post a receive buffer, err=%d.\n", | ||
469 | ret); | ||
470 | pr_err("svcrdma: closing transport %p.\n", xprt); | ||
471 | set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); | ||
472 | ret = -ENOTCONN; | ||
473 | } | ||
474 | return ret; | ||
475 | } | ||
476 | |||
477 | static void | 466 | static void |
478 | svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt, | 467 | svc_rdma_parse_connect_private(struct svcxprt_rdma *newxprt, |
479 | struct rdma_conn_param *param) | 468 | struct rdma_conn_param *param) |
@@ -833,7 +822,7 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt) | |||
833 | 822 | ||
834 | /* Post receive buffers */ | 823 | /* Post receive buffers */ |
835 | for (i = 0; i < newxprt->sc_max_requests; i++) { | 824 | for (i = 0; i < newxprt->sc_max_requests; i++) { |
836 | ret = svc_rdma_post_recv(newxprt, GFP_KERNEL); | 825 | ret = svc_rdma_post_recv(newxprt); |
837 | if (ret) { | 826 | if (ret) { |
838 | dprintk("svcrdma: failure posting receive buffers\n"); | 827 | dprintk("svcrdma: failure posting receive buffers\n"); |
839 | goto errout; | 828 | goto errout; |