aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2014-07-29 17:23:43 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2014-07-31 16:22:53 -0400
commit6ab59945f292a5c6cbc4a6c2011f1a732a116af2 (patch)
treeb6027a5d873b2c5941cfb279be193ad366b7b15d /net
parent43e95988178ed70a878a5be6be9ad248342dbf7d (diff)
xprtrdma: Update rkeys after transport reconnect
Various reports of: rpcrdma_qp_async_error_upcall: QP error 3 on device mlx4_0 ep ffff8800bfd3e848 Ensure that rkeys in already-marshalled RPC/RDMA headers are refreshed after the QP has been replaced by a reconnect. BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=249 Suggested-by: Selvin Xavier <Selvin.Xavier@Emulex.Com> Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Tested-by: Steve Wise <swise@opengridcomputing.com> Tested-by: Shirley Ma <shirley.ma@oracle.com> Tested-by: Devesh Sharma <devesh.sharma@emulex.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net')
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c75
-rw-r--r--net/sunrpc/xprtrdma/transport.c11
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h10
3 files changed, 54 insertions, 42 deletions
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 693966d3f33b..54422f73b03b 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -53,14 +53,6 @@
53# define RPCDBG_FACILITY RPCDBG_TRANS 53# define RPCDBG_FACILITY RPCDBG_TRANS
54#endif 54#endif
55 55
56enum rpcrdma_chunktype {
57 rpcrdma_noch = 0,
58 rpcrdma_readch,
59 rpcrdma_areadch,
60 rpcrdma_writech,
61 rpcrdma_replych
62};
63
64#ifdef RPC_DEBUG 56#ifdef RPC_DEBUG
65static const char transfertypes[][12] = { 57static const char transfertypes[][12] = {
66 "pure inline", /* no chunks */ 58 "pure inline", /* no chunks */
@@ -286,6 +278,28 @@ out:
286} 278}
287 279
288/* 280/*
281 * Marshal chunks. This routine returns the header length
282 * consumed by marshaling.
283 *
284 * Returns positive RPC/RDMA header size, or negative errno.
285 */
286
287ssize_t
288rpcrdma_marshal_chunks(struct rpc_rqst *rqst, ssize_t result)
289{
290 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
291 struct rpcrdma_msg *headerp = (struct rpcrdma_msg *)req->rl_base;
292
293 if (req->rl_rtype != rpcrdma_noch)
294 result = rpcrdma_create_chunks(rqst, &rqst->rq_snd_buf,
295 headerp, req->rl_rtype);
296 else if (req->rl_wtype != rpcrdma_noch)
297 result = rpcrdma_create_chunks(rqst, &rqst->rq_rcv_buf,
298 headerp, req->rl_wtype);
299 return result;
300}
301
302/*
289 * Copy write data inline. 303 * Copy write data inline.
290 * This function is used for "small" requests. Data which is passed 304 * This function is used for "small" requests. Data which is passed
291 * to RPC via iovecs (or page list) is copied directly into the 305 * to RPC via iovecs (or page list) is copied directly into the
@@ -377,7 +391,6 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
377 char *base; 391 char *base;
378 size_t rpclen, padlen; 392 size_t rpclen, padlen;
379 ssize_t hdrlen; 393 ssize_t hdrlen;
380 enum rpcrdma_chunktype rtype, wtype;
381 struct rpcrdma_msg *headerp; 394 struct rpcrdma_msg *headerp;
382 395
383 /* 396 /*
@@ -415,13 +428,13 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
415 * into pages; otherwise use reply chunks. 428 * into pages; otherwise use reply chunks.
416 */ 429 */
417 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst)) 430 if (rqst->rq_rcv_buf.buflen <= RPCRDMA_INLINE_READ_THRESHOLD(rqst))
418 wtype = rpcrdma_noch; 431 req->rl_wtype = rpcrdma_noch;
419 else if (rqst->rq_rcv_buf.page_len == 0) 432 else if (rqst->rq_rcv_buf.page_len == 0)
420 wtype = rpcrdma_replych; 433 req->rl_wtype = rpcrdma_replych;
421 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ) 434 else if (rqst->rq_rcv_buf.flags & XDRBUF_READ)
422 wtype = rpcrdma_writech; 435 req->rl_wtype = rpcrdma_writech;
423 else 436 else
424 wtype = rpcrdma_replych; 437 req->rl_wtype = rpcrdma_replych;
425 438
426 /* 439 /*
427 * Chunks needed for arguments? 440 * Chunks needed for arguments?
@@ -438,16 +451,16 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
438 * TBD check NFSv4 setacl 451 * TBD check NFSv4 setacl
439 */ 452 */
440 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst)) 453 if (rqst->rq_snd_buf.len <= RPCRDMA_INLINE_WRITE_THRESHOLD(rqst))
441 rtype = rpcrdma_noch; 454 req->rl_rtype = rpcrdma_noch;
442 else if (rqst->rq_snd_buf.page_len == 0) 455 else if (rqst->rq_snd_buf.page_len == 0)
443 rtype = rpcrdma_areadch; 456 req->rl_rtype = rpcrdma_areadch;
444 else 457 else
445 rtype = rpcrdma_readch; 458 req->rl_rtype = rpcrdma_readch;
446 459
447 /* The following simplification is not true forever */ 460 /* The following simplification is not true forever */
448 if (rtype != rpcrdma_noch && wtype == rpcrdma_replych) 461 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype == rpcrdma_replych)
449 wtype = rpcrdma_noch; 462 req->rl_wtype = rpcrdma_noch;
450 if (rtype != rpcrdma_noch && wtype != rpcrdma_noch) { 463 if (req->rl_rtype != rpcrdma_noch && req->rl_wtype != rpcrdma_noch) {
451 dprintk("RPC: %s: cannot marshal multiple chunk lists\n", 464 dprintk("RPC: %s: cannot marshal multiple chunk lists\n",
452 __func__); 465 __func__);
453 return -EIO; 466 return -EIO;
@@ -461,7 +474,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
461 * When padding is in use and applies to the transfer, insert 474 * When padding is in use and applies to the transfer, insert
462 * it and change the message type. 475 * it and change the message type.
463 */ 476 */
464 if (rtype == rpcrdma_noch) { 477 if (req->rl_rtype == rpcrdma_noch) {
465 478
466 padlen = rpcrdma_inline_pullup(rqst, 479 padlen = rpcrdma_inline_pullup(rqst,
467 RPCRDMA_INLINE_PAD_VALUE(rqst)); 480 RPCRDMA_INLINE_PAD_VALUE(rqst));
@@ -476,7 +489,7 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
476 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero; 489 headerp->rm_body.rm_padded.rm_pempty[1] = xdr_zero;
477 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero; 490 headerp->rm_body.rm_padded.rm_pempty[2] = xdr_zero;
478 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */ 491 hdrlen += 2 * sizeof(u32); /* extra words in padhdr */
479 if (wtype != rpcrdma_noch) { 492 if (req->rl_wtype != rpcrdma_noch) {
480 dprintk("RPC: %s: invalid chunk list\n", 493 dprintk("RPC: %s: invalid chunk list\n",
481 __func__); 494 __func__);
482 return -EIO; 495 return -EIO;
@@ -497,30 +510,18 @@ rpcrdma_marshal_req(struct rpc_rqst *rqst)
497 * on receive. Therefore, we request a reply chunk 510 * on receive. Therefore, we request a reply chunk
498 * for non-writes wherever feasible and efficient. 511 * for non-writes wherever feasible and efficient.
499 */ 512 */
500 if (wtype == rpcrdma_noch) 513 if (req->rl_wtype == rpcrdma_noch)
501 wtype = rpcrdma_replych; 514 req->rl_wtype = rpcrdma_replych;
502 } 515 }
503 } 516 }
504 517
505 /* 518 hdrlen = rpcrdma_marshal_chunks(rqst, hdrlen);
506 * Marshal chunks. This routine will return the header length
507 * consumed by marshaling.
508 */
509 if (rtype != rpcrdma_noch) {
510 hdrlen = rpcrdma_create_chunks(rqst,
511 &rqst->rq_snd_buf, headerp, rtype);
512 wtype = rtype; /* simplify dprintk */
513
514 } else if (wtype != rpcrdma_noch) {
515 hdrlen = rpcrdma_create_chunks(rqst,
516 &rqst->rq_rcv_buf, headerp, wtype);
517 }
518 if (hdrlen < 0) 519 if (hdrlen < 0)
519 return hdrlen; 520 return hdrlen;
520 521
521 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd" 522 dprintk("RPC: %s: %s: hdrlen %zd rpclen %zd padlen %zd"
522 " headerp 0x%p base 0x%p lkey 0x%x\n", 523 " headerp 0x%p base 0x%p lkey 0x%x\n",
523 __func__, transfertypes[wtype], hdrlen, rpclen, padlen, 524 __func__, transfertypes[req->rl_wtype], hdrlen, rpclen, padlen,
524 headerp, base, req->rl_iov.lkey); 525 headerp, base, req->rl_iov.lkey);
525 526
526 /* 527 /*
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 418510202919..f6d280b31dc9 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -597,13 +597,14 @@ xprt_rdma_send_request(struct rpc_task *task)
597 struct rpc_xprt *xprt = rqst->rq_xprt; 597 struct rpc_xprt *xprt = rqst->rq_xprt;
598 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); 598 struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
599 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt); 599 struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
600 int rc; 600 int rc = 0;
601 601
602 if (req->rl_niovs == 0) { 602 if (req->rl_niovs == 0)
603 rc = rpcrdma_marshal_req(rqst); 603 rc = rpcrdma_marshal_req(rqst);
604 if (rc < 0) 604 else if (r_xprt->rx_ia.ri_memreg_strategy == RPCRDMA_FRMR)
605 goto failed_marshal; 605 rc = rpcrdma_marshal_chunks(rqst, 0);
606 } 606 if (rc < 0)
607 goto failed_marshal;
607 608
608 if (req->rl_reply == NULL) /* e.g. reconnection */ 609 if (req->rl_reply == NULL) /* e.g. reconnection */
609 rpcrdma_recv_buffer_get(req); 610 rpcrdma_recv_buffer_get(req);
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index f3d86b24a4af..c270e59cf917 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -99,6 +99,14 @@ struct rpcrdma_ep {
99#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit) 99#define INIT_CQCOUNT(ep) atomic_set(&(ep)->rep_cqcount, (ep)->rep_cqinit)
100#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount) 100#define DECR_CQCOUNT(ep) atomic_sub_return(1, &(ep)->rep_cqcount)
101 101
102enum rpcrdma_chunktype {
103 rpcrdma_noch = 0,
104 rpcrdma_readch,
105 rpcrdma_areadch,
106 rpcrdma_writech,
107 rpcrdma_replych
108};
109
102/* 110/*
103 * struct rpcrdma_rep -- this structure encapsulates state required to recv 111 * struct rpcrdma_rep -- this structure encapsulates state required to recv
104 * and complete a reply, asychronously. It needs several pieces of 112 * and complete a reply, asychronously. It needs several pieces of
@@ -192,6 +200,7 @@ struct rpcrdma_req {
192 unsigned int rl_niovs; /* 0, 2 or 4 */ 200 unsigned int rl_niovs; /* 0, 2 or 4 */
193 unsigned int rl_nchunks; /* non-zero if chunks */ 201 unsigned int rl_nchunks; /* non-zero if chunks */
194 unsigned int rl_connect_cookie; /* retry detection */ 202 unsigned int rl_connect_cookie; /* retry detection */
203 enum rpcrdma_chunktype rl_rtype, rl_wtype;
195 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */ 204 struct rpcrdma_buffer *rl_buffer; /* home base for this structure */
196 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ 205 struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
197 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */ 206 struct rpcrdma_mr_seg rl_segments[RPCRDMA_MAX_SEGS];/* chunk segments */
@@ -347,6 +356,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
347/* 356/*
348 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c 357 * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
349 */ 358 */
359ssize_t rpcrdma_marshal_chunks(struct rpc_rqst *, ssize_t);
350int rpcrdma_marshal_req(struct rpc_rqst *); 360int rpcrdma_marshal_req(struct rpc_rqst *);
351size_t rpcrdma_max_payload(struct rpcrdma_xprt *); 361size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
352 362