aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-03-03 03:36:06 -0500
committerDoug Ledford <dledford@redhat.com>2016-03-10 20:54:09 -0500
commit7cf20fc62428367bbf853a8d968804a6ec6a4973 (patch)
treea57192d4f43eae28c41d2295e663f2c66566f83e
parentfc77dbd34c5c99bce46d40a2491937c3bcbd10af (diff)
net/9p: convert to new CQ API
Trivial conversion to the new RDMA CQ API. Signed-off-by: Christoph Hellwig <hch@lst.de> Acked-by: Dominique Martinet <dominique.martinet@cea.fr> Signed-off-by: Doug Ledford <dledford@redhat.com>
-rw-r--r--net/9p/trans_rdma.c86
1 files changed, 31 insertions, 55 deletions
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 52b4a2f993f2..1852e383afd6 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -109,14 +109,13 @@ struct p9_trans_rdma {
109/** 109/**
110 * p9_rdma_context - Keeps track of in-process WR 110 * p9_rdma_context - Keeps track of in-process WR
111 * 111 *
112 * @wc_op: The original WR op for when the CQE completes in error.
113 * @busa: Bus address to unmap when the WR completes 112 * @busa: Bus address to unmap when the WR completes
114 * @req: Keeps track of requests (send) 113 * @req: Keeps track of requests (send)
115 * @rc: Keepts track of replies (receive) 114 * @rc: Keepts track of replies (receive)
116 */ 115 */
117struct p9_rdma_req; 116struct p9_rdma_req;
118struct p9_rdma_context { 117struct p9_rdma_context {
119 enum ib_wc_opcode wc_op; 118 struct ib_cqe cqe;
120 dma_addr_t busa; 119 dma_addr_t busa;
121 union { 120 union {
122 struct p9_req_t *req; 121 struct p9_req_t *req;
@@ -284,9 +283,12 @@ p9_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
284} 283}
285 284
286static void 285static void
287handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma, 286recv_done(struct ib_cq *cq, struct ib_wc *wc)
288 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
289{ 287{
288 struct p9_client *client = cq->cq_context;
289 struct p9_trans_rdma *rdma = client->trans;
290 struct p9_rdma_context *c =
291 container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
290 struct p9_req_t *req; 292 struct p9_req_t *req;
291 int err = 0; 293 int err = 0;
292 int16_t tag; 294 int16_t tag;
@@ -295,7 +297,7 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
295 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize, 297 ib_dma_unmap_single(rdma->cm_id->device, c->busa, client->msize,
296 DMA_FROM_DEVICE); 298 DMA_FROM_DEVICE);
297 299
298 if (status != IB_WC_SUCCESS) 300 if (wc->status != IB_WC_SUCCESS)
299 goto err_out; 301 goto err_out;
300 302
301 err = p9_parse_header(c->rc, NULL, NULL, &tag, 1); 303 err = p9_parse_header(c->rc, NULL, NULL, &tag, 1);
@@ -316,21 +318,32 @@ handle_recv(struct p9_client *client, struct p9_trans_rdma *rdma,
316 req->rc = c->rc; 318 req->rc = c->rc;
317 p9_client_cb(client, req, REQ_STATUS_RCVD); 319 p9_client_cb(client, req, REQ_STATUS_RCVD);
318 320
321 out:
322 up(&rdma->rq_sem);
323 kfree(c);
319 return; 324 return;
320 325
321 err_out: 326 err_out:
322 p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n", req, err, status); 327 p9_debug(P9_DEBUG_ERROR, "req %p err %d status %d\n",
328 req, err, wc->status);
323 rdma->state = P9_RDMA_FLUSHING; 329 rdma->state = P9_RDMA_FLUSHING;
324 client->status = Disconnected; 330 client->status = Disconnected;
331 goto out;
325} 332}
326 333
327static void 334static void
328handle_send(struct p9_client *client, struct p9_trans_rdma *rdma, 335send_done(struct ib_cq *cq, struct ib_wc *wc)
329 struct p9_rdma_context *c, enum ib_wc_status status, u32 byte_len)
330{ 336{
337 struct p9_client *client = cq->cq_context;
338 struct p9_trans_rdma *rdma = client->trans;
339 struct p9_rdma_context *c =
340 container_of(wc->wr_cqe, struct p9_rdma_context, cqe);
341
331 ib_dma_unmap_single(rdma->cm_id->device, 342 ib_dma_unmap_single(rdma->cm_id->device,
332 c->busa, c->req->tc->size, 343 c->busa, c->req->tc->size,
333 DMA_TO_DEVICE); 344 DMA_TO_DEVICE);
345 up(&rdma->sq_sem);
346 kfree(c);
334} 347}
335 348
336static void qp_event_handler(struct ib_event *event, void *context) 349static void qp_event_handler(struct ib_event *event, void *context)
@@ -339,42 +352,6 @@ static void qp_event_handler(struct ib_event *event, void *context)
339 event->event, context); 352 event->event, context);
340} 353}
341 354
342static void cq_comp_handler(struct ib_cq *cq, void *cq_context)
343{
344 struct p9_client *client = cq_context;
345 struct p9_trans_rdma *rdma = client->trans;
346 int ret;
347 struct ib_wc wc;
348
349 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
350 while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
351 struct p9_rdma_context *c = (void *) (unsigned long) wc.wr_id;
352
353 switch (c->wc_op) {
354 case IB_WC_RECV:
355 handle_recv(client, rdma, c, wc.status, wc.byte_len);
356 up(&rdma->rq_sem);
357 break;
358
359 case IB_WC_SEND:
360 handle_send(client, rdma, c, wc.status, wc.byte_len);
361 up(&rdma->sq_sem);
362 break;
363
364 default:
365 pr_err("unexpected completion type, c->wc_op=%d, wc.opcode=%d, status=%d\n",
366 c->wc_op, wc.opcode, wc.status);
367 break;
368 }
369 kfree(c);
370 }
371}
372
373static void cq_event_handler(struct ib_event *e, void *v)
374{
375 p9_debug(P9_DEBUG_ERROR, "CQ event %d context %p\n", e->event, v);
376}
377
378static void rdma_destroy_trans(struct p9_trans_rdma *rdma) 355static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
379{ 356{
380 if (!rdma) 357 if (!rdma)
@@ -387,7 +364,7 @@ static void rdma_destroy_trans(struct p9_trans_rdma *rdma)
387 ib_dealloc_pd(rdma->pd); 364 ib_dealloc_pd(rdma->pd);
388 365
389 if (rdma->cq && !IS_ERR(rdma->cq)) 366 if (rdma->cq && !IS_ERR(rdma->cq))
390 ib_destroy_cq(rdma->cq); 367 ib_free_cq(rdma->cq);
391 368
392 if (rdma->cm_id && !IS_ERR(rdma->cm_id)) 369 if (rdma->cm_id && !IS_ERR(rdma->cm_id))
393 rdma_destroy_id(rdma->cm_id); 370 rdma_destroy_id(rdma->cm_id);
@@ -408,13 +385,14 @@ post_recv(struct p9_client *client, struct p9_rdma_context *c)
408 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) 385 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa))
409 goto error; 386 goto error;
410 387
388 c->cqe.done = recv_done;
389
411 sge.addr = c->busa; 390 sge.addr = c->busa;
412 sge.length = client->msize; 391 sge.length = client->msize;
413 sge.lkey = rdma->pd->local_dma_lkey; 392 sge.lkey = rdma->pd->local_dma_lkey;
414 393
415 wr.next = NULL; 394 wr.next = NULL;
416 c->wc_op = IB_WC_RECV; 395 wr.wr_cqe = &c->cqe;
417 wr.wr_id = (unsigned long) c;
418 wr.sg_list = &sge; 396 wr.sg_list = &sge;
419 wr.num_sge = 1; 397 wr.num_sge = 1;
420 return ib_post_recv(rdma->qp, &wr, &bad_wr); 398 return ib_post_recv(rdma->qp, &wr, &bad_wr);
@@ -499,13 +477,14 @@ dont_need_post_recv:
499 goto send_error; 477 goto send_error;
500 } 478 }
501 479
480 c->cqe.done = send_done;
481
502 sge.addr = c->busa; 482 sge.addr = c->busa;
503 sge.length = c->req->tc->size; 483 sge.length = c->req->tc->size;
504 sge.lkey = rdma->pd->local_dma_lkey; 484 sge.lkey = rdma->pd->local_dma_lkey;
505 485
506 wr.next = NULL; 486 wr.next = NULL;
507 c->wc_op = IB_WC_SEND; 487 wr.wr_cqe = &c->cqe;
508 wr.wr_id = (unsigned long) c;
509 wr.opcode = IB_WR_SEND; 488 wr.opcode = IB_WR_SEND;
510 wr.send_flags = IB_SEND_SIGNALED; 489 wr.send_flags = IB_SEND_SIGNALED;
511 wr.sg_list = &sge; 490 wr.sg_list = &sge;
@@ -642,7 +621,6 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
642 struct p9_trans_rdma *rdma; 621 struct p9_trans_rdma *rdma;
643 struct rdma_conn_param conn_param; 622 struct rdma_conn_param conn_param;
644 struct ib_qp_init_attr qp_attr; 623 struct ib_qp_init_attr qp_attr;
645 struct ib_cq_init_attr cq_attr = {};
646 624
647 /* Parse the transport specific mount options */ 625 /* Parse the transport specific mount options */
648 err = parse_opts(args, &opts); 626 err = parse_opts(args, &opts);
@@ -695,13 +673,11 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args)
695 goto error; 673 goto error;
696 674
697 /* Create the Completion Queue */ 675 /* Create the Completion Queue */
698 cq_attr.cqe = opts.sq_depth + opts.rq_depth + 1; 676 rdma->cq = ib_alloc_cq(rdma->cm_id->device, client,
699 rdma->cq = ib_create_cq(rdma->cm_id->device, cq_comp_handler, 677 opts.sq_depth + opts.rq_depth + 1,
700 cq_event_handler, client, 678 0, IB_POLL_SOFTIRQ);
701 &cq_attr);
702 if (IS_ERR(rdma->cq)) 679 if (IS_ERR(rdma->cq))
703 goto error; 680 goto error;
704 ib_req_notify_cq(rdma->cq, IB_CQ_NEXT_COMP);
705 681
706 /* Create the Protection Domain */ 682 /* Create the Protection Domain */
707 rdma->pd = ib_alloc_pd(rdma->cm_id->device); 683 rdma->pd = ib_alloc_pd(rdma->cm_id->device);