diff options
author | Chuck Lever <chuck.lever@oracle.com> | 2019-06-19 10:33:20 -0400 |
---|---|---|
committer | Anna Schumaker <Anna.Schumaker@Netapp.com> | 2019-07-09 10:30:25 -0400 |
commit | 379d1bc5be373c920bcda16b9894ae99505ea127 (patch) | |
tree | eff3b258a776e7beaa1be8b05c7b673019444470 /net/sunrpc | |
parent | 0ab115237025f5e379620bbcd56a02697d07b002 (diff) |
xprtrdma: Simplify rpcrdma_rep_create
Clean up.
Commit 7c8d9e7c8863 ("xprtrdma: Move Receive posting to Receive
handler") reduced the number of rpcrdma_rep_create call sites to
one. After that commit, the backchannel code no longer invokes it.
Therefore the free list logic added by commit d698c4a02ee0
("xprtrdma: Fix backchannel allocation of extra rpcrdma_reps") is
no longer necessary, and in fact adds some extra overhead that we
can do without.
Simply post any newly created reps. They will get added back to
the rb_recv_bufs list when they subsequently complete.
Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r-- | net/sunrpc/xprtrdma/verbs.c | 22 |
1 files changed, 8 insertions, 14 deletions
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index 4e22cc244149..de6be101abf2 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -1036,9 +1036,9 @@ out1: | |||
1036 | return NULL; | 1036 | return NULL; |
1037 | } | 1037 | } |
1038 | 1038 | ||
1039 | static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp) | 1039 | static struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, |
1040 | bool temp) | ||
1040 | { | 1041 | { |
1041 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | ||
1042 | struct rpcrdma_rep *rep; | 1042 | struct rpcrdma_rep *rep; |
1043 | 1043 | ||
1044 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); | 1044 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
@@ -1049,9 +1049,9 @@ static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp) | |||
1049 | DMA_FROM_DEVICE, GFP_KERNEL); | 1049 | DMA_FROM_DEVICE, GFP_KERNEL); |
1050 | if (!rep->rr_rdmabuf) | 1050 | if (!rep->rr_rdmabuf) |
1051 | goto out_free; | 1051 | goto out_free; |
1052 | |||
1052 | xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), | 1053 | xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), |
1053 | rdmab_length(rep->rr_rdmabuf)); | 1054 | rdmab_length(rep->rr_rdmabuf)); |
1054 | |||
1055 | rep->rr_cqe.done = rpcrdma_wc_receive; | 1055 | rep->rr_cqe.done = rpcrdma_wc_receive; |
1056 | rep->rr_rxprt = r_xprt; | 1056 | rep->rr_rxprt = r_xprt; |
1057 | rep->rr_recv_wr.next = NULL; | 1057 | rep->rr_recv_wr.next = NULL; |
@@ -1059,16 +1059,12 @@ static bool rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, bool temp) | |||
1059 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; | 1059 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; |
1060 | rep->rr_recv_wr.num_sge = 1; | 1060 | rep->rr_recv_wr.num_sge = 1; |
1061 | rep->rr_temp = temp; | 1061 | rep->rr_temp = temp; |
1062 | 1062 | return rep; | |
1063 | spin_lock(&buf->rb_lock); | ||
1064 | list_add(&rep->rr_list, &buf->rb_recv_bufs); | ||
1065 | spin_unlock(&buf->rb_lock); | ||
1066 | return true; | ||
1067 | 1063 | ||
1068 | out_free: | 1064 | out_free: |
1069 | kfree(rep); | 1065 | kfree(rep); |
1070 | out: | 1066 | out: |
1071 | return false; | 1067 | return NULL; |
1072 | } | 1068 | } |
1073 | 1069 | ||
1074 | /** | 1070 | /** |
@@ -1497,7 +1493,6 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) | |||
1497 | count = 0; | 1493 | count = 0; |
1498 | wr = NULL; | 1494 | wr = NULL; |
1499 | while (needed) { | 1495 | while (needed) { |
1500 | struct rpcrdma_regbuf *rb; | ||
1501 | struct rpcrdma_rep *rep; | 1496 | struct rpcrdma_rep *rep; |
1502 | 1497 | ||
1503 | spin_lock(&buf->rb_lock); | 1498 | spin_lock(&buf->rb_lock); |
@@ -1507,13 +1502,12 @@ rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) | |||
1507 | list_del(&rep->rr_list); | 1502 | list_del(&rep->rr_list); |
1508 | spin_unlock(&buf->rb_lock); | 1503 | spin_unlock(&buf->rb_lock); |
1509 | if (!rep) { | 1504 | if (!rep) { |
1510 | if (!rpcrdma_rep_create(r_xprt, temp)) | 1505 | rep = rpcrdma_rep_create(r_xprt, temp); |
1506 | if (!rep) | ||
1511 | break; | 1507 | break; |
1512 | continue; | ||
1513 | } | 1508 | } |
1514 | 1509 | ||
1515 | rb = rep->rr_rdmabuf; | 1510 | if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) { |
1516 | if (!rpcrdma_regbuf_dma_map(r_xprt, rb)) { | ||
1517 | rpcrdma_recv_buffer_put(rep); | 1511 | rpcrdma_recv_buffer_put(rep); |
1518 | break; | 1512 | break; |
1519 | } | 1513 | } |