aboutsummaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorSimon Derr <simon.derr@bull.net>2013-06-21 09:32:39 -0400
committerEric Van Hensbergen <ericvh@gmail.com>2013-07-07 23:02:29 -0400
commitfd453d0ed6c1dacef8eff466df473d62d63db1e9 (patch)
treed1858110c400fc20cf403e2798a5a3d1546dcbaf /net
parent47229ff85e5a0b0613df2288d212938aeb9687da (diff)
9P/RDMA: Use a semaphore to protect the RQ
The current code keeps track of the number of buffers posted in the RQ, and will prevent it from overflowing. But it does so by simply dropping post requests (And leaking memory in the process). When this happens there will actually be too few buffers posted, and soon the 9P server will complain about 'RNR retry counter exceeded' errors. Instead, use a semaphore, and block until the RQ is ready for another buffer to be posted. Signed-off-by: Simon Derr <simon.derr@bull.net> Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'net')
-rw-r--r--net/9p/trans_rdma.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 274a9c1d3c3d..ad8dc331574b 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -73,7 +73,7 @@
73 * @sq_depth: The depth of the Send Queue 73 * @sq_depth: The depth of the Send Queue
74 * @sq_sem: Semaphore for the SQ 74 * @sq_sem: Semaphore for the SQ
75 * @rq_depth: The depth of the Receive Queue. 75 * @rq_depth: The depth of the Receive Queue.
76 * @rq_count: Count of requests in the Receive Queue. 76 * @rq_sem: Semaphore for the RQ
77 * @addr: The remote peer's address 77 * @addr: The remote peer's address
78 * @req_lock: Protects the active request list 78 * @req_lock: Protects the active request list
79 * @cm_done: Completion event for connection management tracking 79 * @cm_done: Completion event for connection management tracking
@@ -98,7 +98,7 @@ struct p9_trans_rdma {
98 int sq_depth; 98 int sq_depth;
99 struct semaphore sq_sem; 99 struct semaphore sq_sem;
100 int rq_depth; 100 int rq_depth;
101 atomic_t rq_count; 101 struct semaphore rq_sem;
102 struct sockaddr_in addr; 102 struct sockaddr_in addr;
103 spinlock_t req_lock; 103 spinlock_t req_lock;
104 104
@@ -341,8 +341,8 @@ static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
341 341
342 switch (c->wc_op) { 342 switch (c->wc_op) {
343 case IB_WC_RECV: 343 case IB_WC_RECV:
344 atomic_dec(&rdma->rq_count);
345 handle_recv(client, rdma, c, wc.status, wc.byte_len); 344 handle_recv(client, rdma, c, wc.status, wc.byte_len);
345 up(&rdma->rq_sem);
346 break; 346 break;
347 347
348 case IB_WC_SEND: 348 case IB_WC_SEND:
@@ -441,12 +441,14 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
441 * outstanding request, so we must keep a count to avoid 441 * outstanding request, so we must keep a count to avoid
442 * overflowing the RQ. 442 * overflowing the RQ.
443 */ 443 */
444 if (atomic_inc_return(&rdma->rq_count) <= rdma->rq_depth) { 444 if (down_interruptible(&rdma->rq_sem))
445 err = post_recv(client, rpl_context); 445 goto error; /* FIXME : -EINTR instead */
446 if (err) 446
447 goto err_free1; 447 err = post_recv(client, rpl_context);
448 } else 448 if (err) {
449 atomic_dec(&rdma->rq_count); 449 p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
450 goto err_free1;
451 }
450 452
451 /* remove posted receive buffer from request structure */ 453 /* remove posted receive buffer from request structure */
452 req->rc = NULL; 454 req->rc = NULL;
@@ -537,7 +539,7 @@ static struct p9_trans_rdma *alloc_rdma(struct p9_rdma_opts *opts)
537 spin_lock_init(&rdma->req_lock); 539 spin_lock_init(&rdma->req_lock);
538 init_completion(&rdma->cm_done); 540 init_completion(&rdma->cm_done);
539 sema_init(&rdma->sq_sem, rdma->sq_depth); 541 sema_init(&rdma->sq_sem, rdma->sq_depth);
540 atomic_set(&rdma->rq_count, 0); 542 sema_init(&rdma->rq_sem, rdma->rq_depth);
541 543
542 return rdma; 544 return rdma;
543} 545}