summaryrefslogtreecommitdiffstats
path: root/net/sunrpc
diff options
context:
space:
mode:
authorChuck Lever <chuck.lever@oracle.com>2017-10-20 10:48:45 -0400
committerAnna Schumaker <Anna.Schumaker@Netapp.com>2017-11-17 13:47:58 -0500
commit6f0afc28257dfa769c210f8f8da0f21d77e7452f (patch)
tree3b448d86a0ddc63451a42e469a95e9378ced93d4 /net/sunrpc
parent01bb35c89d90abe6fd1c0be001f84bbdfa7fa7d1 (diff)
xprtrdma: Remove atomic send completion counting
The sendctx circular queue now guarantees that xprtrdma cannot overflow the Send Queue, so remove the remaining bits of the original Send WQE counting mechanism. Signed-off-by: Chuck Lever <chuck.lever@oracle.com> Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
Diffstat (limited to 'net/sunrpc')
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c8
-rw-r--r--net/sunrpc/xprtrdma/verbs.c4
-rw-r--r--net/sunrpc/xprtrdma/xprt_rdma.h21
3 files changed, 0 insertions, 33 deletions
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 3053fb0f5cb3..404166ac958f 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -419,7 +419,6 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
419 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE : 419 IB_ACCESS_REMOTE_WRITE | IB_ACCESS_LOCAL_WRITE :
420 IB_ACCESS_REMOTE_READ; 420 IB_ACCESS_REMOTE_READ;
421 421
422 rpcrdma_set_signaled(&r_xprt->rx_ep, &reg_wr->wr);
423 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr); 422 rc = ib_post_send(ia->ri_id->qp, &reg_wr->wr, &bad_wr);
424 if (rc) 423 if (rc)
425 goto out_senderr; 424 goto out_senderr;
@@ -507,12 +506,6 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct list_head *mws)
507 f->fr_cqe.done = frwr_wc_localinv_wake; 506 f->fr_cqe.done = frwr_wc_localinv_wake;
508 reinit_completion(&f->fr_linv_done); 507 reinit_completion(&f->fr_linv_done);
509 508
510 /* Initialize CQ count, since there is always a signaled
511 * WR being posted here. The new cqcount depends on how
512 * many SQEs are about to be consumed.
513 */
514 rpcrdma_init_cqcount(&r_xprt->rx_ep, count);
515
516 /* Transport disconnect drains the receive CQ before it 509 /* Transport disconnect drains the receive CQ before it
517 * replaces the QP. The RPC reply handler won't call us 510 * replaces the QP. The RPC reply handler won't call us
518 * unless ri_id->qp is a valid pointer. 511 * unless ri_id->qp is a valid pointer.
@@ -545,7 +538,6 @@ reset_mrs:
545 /* Find and reset the MRs in the LOCAL_INV WRs that did not 538 /* Find and reset the MRs in the LOCAL_INV WRs that did not
546 * get posted. 539 * get posted.
547 */ 540 */
548 rpcrdma_init_cqcount(&r_xprt->rx_ep, -count);
549 while (bad_wr) { 541 while (bad_wr) {
550 f = container_of(bad_wr, struct rpcrdma_frmr, 542 f = container_of(bad_wr, struct rpcrdma_frmr,
551 fr_invwr); 543 fr_invwr);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 9a824fe8ffc2..22128a81da63 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -553,10 +553,6 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
553 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH, 553 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
554 cdata->max_requests >> 2); 554 cdata->max_requests >> 2);
555 ep->rep_send_count = ep->rep_send_batch; 555 ep->rep_send_count = ep->rep_send_batch;
556 ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1;
557 if (ep->rep_cqinit <= 2)
558 ep->rep_cqinit = 0; /* always signal? */
559 rpcrdma_init_cqcount(ep, 0);
560 init_waitqueue_head(&ep->rep_connect_wait); 556 init_waitqueue_head(&ep->rep_connect_wait);
561 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); 557 INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker);
562 558
diff --git a/net/sunrpc/xprtrdma/xprt_rdma.h b/net/sunrpc/xprtrdma/xprt_rdma.h
index bccd5d8b9384..6e64c8259d34 100644
--- a/net/sunrpc/xprtrdma/xprt_rdma.h
+++ b/net/sunrpc/xprtrdma/xprt_rdma.h
@@ -95,8 +95,6 @@ enum {
95struct rpcrdma_ep { 95struct rpcrdma_ep {
96 unsigned int rep_send_count; 96 unsigned int rep_send_count;
97 unsigned int rep_send_batch; 97 unsigned int rep_send_batch;
98 atomic_t rep_cqcount;
99 int rep_cqinit;
100 int rep_connected; 98 int rep_connected;
101 struct ib_qp_init_attr rep_attr; 99 struct ib_qp_init_attr rep_attr;
102 wait_queue_head_t rep_connect_wait; 100 wait_queue_head_t rep_connect_wait;
@@ -106,25 +104,6 @@ struct rpcrdma_ep {
106 struct delayed_work rep_connect_worker; 104 struct delayed_work rep_connect_worker;
107}; 105};
108 106
109static inline void
110rpcrdma_init_cqcount(struct rpcrdma_ep *ep, int count)
111{
112 atomic_set(&ep->rep_cqcount, ep->rep_cqinit - count);
113}
114
115/* To update send queue accounting, provider must take a
116 * send completion every now and then.
117 */
118static inline void
119rpcrdma_set_signaled(struct rpcrdma_ep *ep, struct ib_send_wr *send_wr)
120{
121 send_wr->send_flags = 0;
122 if (unlikely(atomic_sub_return(1, &ep->rep_cqcount) <= 0)) {
123 rpcrdma_init_cqcount(ep, 0);
124 send_wr->send_flags = IB_SEND_SIGNALED;
125 }
126}
127
128/* Pre-allocate extra Work Requests for handling backward receives 107/* Pre-allocate extra Work Requests for handling backward receives
129 * and sends. This is a fixed value because the Work Queues are 108 * and sends. This is a fixed value because the Work Queues are
130 * allocated when the forward channel is set up. 109 * allocated when the forward channel is set up.