aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2016-03-01 13:06:38 -0500
committerJ. Bruce Fields <bfields@redhat.com>2016-03-01 16:06:38 -0500
commita6081b82c533d78041acb76738716aa7dafb339a (patch)
tree2d7d5f64b01bbf23494b1dc2de84ac9e21552e9d
parentc6db03ea577846a72dc80638f4a70b392c21962f (diff)
svcrdma: Make RDMA_ERROR messages work
Fix several issues with svc_rdma_send_error(): - Post a receive buffer to replace the one that was consumed by the incoming request - Posting a send should use DMA_TO_DEVICE, not DMA_FROM_DEVICE - No need to put_page _and_ free pages in svc_rdma_put_context - Make sure the sge is set up completely in case the error path goes through svc_rdma_unmap_dma() - Replace the use of ENOSYS, which has a reserved meaning Related fixes in svc_rdma_recvfrom(): - Don't leak the ctxt associated with the incoming request - Don't close the connection after sending an error reply - Let svc_rdma_send_error() figure out the right header error code As a last clean up, move svc_rdma_send_error() to svc_rdma_sendto.c with other similar functions. There is some common logic in these functions that could someday be combined to reduce code duplication. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Reviewed-by: Devesh Sharma <devesh.sharma@broadcom.com> Tested-by: Devesh Sharma <devesh.sharma@broadcom.com> Signed-off-by: J. Bruce Fields <bfields@redhat.com>
-rw-r--r--include/linux/sunrpc/svc_rdma.h4
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_marshal.c2
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c19
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_sendto.c62
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_transport.c54
5 files changed, 74 insertions, 67 deletions
diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
index aef47dd2bd1a..42e852230a03 100644
--- a/include/linux/sunrpc/svc_rdma.h
+++ b/include/linux/sunrpc/svc_rdma.h
@@ -228,11 +228,11 @@ extern int svc_rdma_map_xdr(struct svcxprt_rdma *, struct xdr_buf *,
228extern int svc_rdma_sendto(struct svc_rqst *); 228extern int svc_rdma_sendto(struct svc_rqst *);
229extern struct rpcrdma_read_chunk * 229extern struct rpcrdma_read_chunk *
230 svc_rdma_get_read_chunk(struct rpcrdma_msg *); 230 svc_rdma_get_read_chunk(struct rpcrdma_msg *);
231extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
232 int);
231 233
232/* svc_rdma_transport.c */ 234/* svc_rdma_transport.c */
233extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *); 235extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
234extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
235 enum rpcrdma_errcode);
236extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t); 236extern int svc_rdma_post_recv(struct svcxprt_rdma *, gfp_t);
237extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t); 237extern int svc_rdma_repost_recv(struct svcxprt_rdma *, gfp_t);
238extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *); 238extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_marshal.c b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
index e2fca7617242..f74fc523d95b 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_marshal.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_marshal.c
@@ -162,7 +162,7 @@ int svc_rdma_xdr_decode_req(struct rpcrdma_msg **rdma_req,
162 } 162 }
163 163
164 if (rmsgp->rm_vers != rpcrdma_version) 164 if (rmsgp->rm_vers != rpcrdma_version)
165 return -ENOSYS; 165 return -EPROTONOSUPPORT;
166 166
167 /* Pull in the extra for the padded case and bump our pointer */ 167 /* Pull in the extra for the padded case and bump our pointer */
168 if (rmsgp->rm_type == rdma_msgp) { 168 if (rmsgp->rm_type == rdma_msgp) {
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index acf15b8bca70..0f09052110a7 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -612,7 +612,6 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
612 struct svc_rdma_op_ctxt *ctxt = NULL; 612 struct svc_rdma_op_ctxt *ctxt = NULL;
613 struct rpcrdma_msg *rmsgp; 613 struct rpcrdma_msg *rmsgp;
614 int ret = 0; 614 int ret = 0;
615 int len;
616 615
617 dprintk("svcrdma: rqstp=%p\n", rqstp); 616 dprintk("svcrdma: rqstp=%p\n", rqstp);
618 617
@@ -654,15 +653,10 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
654 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len); 653 rdma_build_arg_xdr(rqstp, ctxt, ctxt->byte_len);
655 654
656 /* Decode the RDMA header. */ 655 /* Decode the RDMA header. */
657 len = svc_rdma_xdr_decode_req(&rmsgp, rqstp); 656 ret = svc_rdma_xdr_decode_req(&rmsgp, rqstp);
658 rqstp->rq_xprt_hlen = len; 657 if (ret < 0)
659 658 goto out_err;
660 /* If the request is invalid, reply with an error */ 659 rqstp->rq_xprt_hlen = ret;
661 if (len < 0) {
662 if (len == -ENOSYS)
663 svc_rdma_send_error(rdma_xprt, rmsgp, ERR_VERS);
664 goto close_out;
665 }
666 660
667 if (svc_rdma_is_backchannel_reply(xprt, rmsgp)) { 661 if (svc_rdma_is_backchannel_reply(xprt, rmsgp)) {
668 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, rmsgp, 662 ret = svc_rdma_handle_bc_reply(xprt->xpt_bc_xprt, rmsgp,
@@ -698,6 +692,11 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
698 svc_xprt_copy_addrs(rqstp, xprt); 692 svc_xprt_copy_addrs(rqstp, xprt);
699 return ret; 693 return ret;
700 694
695out_err:
696 svc_rdma_send_error(rdma_xprt, rmsgp, ret);
697 svc_rdma_put_context(ctxt, 0);
698 return 0;
699
701 close_out: 700 close_out:
702 if (ctxt) 701 if (ctxt)
703 svc_rdma_put_context(ctxt, 1); 702 svc_rdma_put_context(ctxt, 1);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_sendto.c b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
index ace9efa7aa6c..a26ca569f257 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_sendto.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_sendto.c
@@ -652,3 +652,65 @@ int svc_rdma_sendto(struct svc_rqst *rqstp)
652 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags); 652 set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
653 return -ENOTCONN; 653 return -ENOTCONN;
654} 654}
655
656void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
657 int status)
658{
659 struct ib_send_wr err_wr;
660 struct page *p;
661 struct svc_rdma_op_ctxt *ctxt;
662 enum rpcrdma_errcode err;
663 __be32 *va;
664 int length;
665 int ret;
666
667 ret = svc_rdma_repost_recv(xprt, GFP_KERNEL);
668 if (ret)
669 return;
670
671 p = alloc_page(GFP_KERNEL);
672 if (!p)
673 return;
674 va = page_address(p);
675
676 /* XDR encode an error reply */
677 err = ERR_CHUNK;
678 if (status == -EPROTONOSUPPORT)
679 err = ERR_VERS;
680 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
681
682 ctxt = svc_rdma_get_context(xprt);
683 ctxt->direction = DMA_TO_DEVICE;
684 ctxt->count = 1;
685 ctxt->pages[0] = p;
686
687 /* Prepare SGE for local address */
688 ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
689 ctxt->sge[0].length = length;
690 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
691 p, 0, length, DMA_TO_DEVICE);
692 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
693 dprintk("svcrdma: Error mapping buffer for protocol error\n");
694 svc_rdma_put_context(ctxt, 1);
695 return;
696 }
697 atomic_inc(&xprt->sc_dma_used);
698
699 /* Prepare SEND WR */
700 memset(&err_wr, 0, sizeof(err_wr));
701 ctxt->wr_op = IB_WR_SEND;
702 err_wr.wr_id = (unsigned long)ctxt;
703 err_wr.sg_list = ctxt->sge;
704 err_wr.num_sge = 1;
705 err_wr.opcode = IB_WR_SEND;
706 err_wr.send_flags = IB_SEND_SIGNALED;
707
708 /* Post It */
709 ret = svc_rdma_send(xprt, &err_wr);
710 if (ret) {
711 dprintk("svcrdma: Error %d posting send for protocol error\n",
712 ret);
713 svc_rdma_unmap_dma(ctxt);
714 svc_rdma_put_context(ctxt, 1);
715 }
716}
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c
index 03fdfce45493..15c8fa3ee794 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_transport.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c
@@ -1433,57 +1433,3 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
1433 } 1433 }
1434 return ret; 1434 return ret;
1435} 1435}
1436
1437void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
1438 enum rpcrdma_errcode err)
1439{
1440 struct ib_send_wr err_wr;
1441 struct page *p;
1442 struct svc_rdma_op_ctxt *ctxt;
1443 __be32 *va;
1444 int length;
1445 int ret;
1446
1447 p = alloc_page(GFP_KERNEL);
1448 if (!p)
1449 return;
1450 va = page_address(p);
1451
1452 /* XDR encode error */
1453 length = svc_rdma_xdr_encode_error(xprt, rmsgp, err, va);
1454
1455 ctxt = svc_rdma_get_context(xprt);
1456 ctxt->direction = DMA_FROM_DEVICE;
1457 ctxt->count = 1;
1458 ctxt->pages[0] = p;
1459
1460 /* Prepare SGE for local address */
1461 ctxt->sge[0].addr = ib_dma_map_page(xprt->sc_cm_id->device,
1462 p, 0, length, DMA_FROM_DEVICE);
1463 if (ib_dma_mapping_error(xprt->sc_cm_id->device, ctxt->sge[0].addr)) {
1464 put_page(p);
1465 svc_rdma_put_context(ctxt, 1);
1466 return;
1467 }
1468 atomic_inc(&xprt->sc_dma_used);
1469 ctxt->sge[0].lkey = xprt->sc_pd->local_dma_lkey;
1470 ctxt->sge[0].length = length;
1471
1472 /* Prepare SEND WR */
1473 memset(&err_wr, 0, sizeof err_wr);
1474 ctxt->wr_op = IB_WR_SEND;
1475 err_wr.wr_id = (unsigned long)ctxt;
1476 err_wr.sg_list = ctxt->sge;
1477 err_wr.num_sge = 1;
1478 err_wr.opcode = IB_WR_SEND;
1479 err_wr.send_flags = IB_SEND_SIGNALED;
1480
1481 /* Post It */
1482 ret = svc_rdma_send(xprt, &err_wr);
1483 if (ret) {
1484 dprintk("svcrdma: Error %d posting send for protocol error\n",
1485 ret);
1486 svc_rdma_unmap_dma(ctxt);
1487 svc_rdma_put_context(ctxt, 1);
1488 }
1489}