aboutsummaryrefslogtreecommitdiffstats
path: root/net/9p
diff options
context:
space:
mode:
authorSimon Derr <simon.derr@bull.net>2013-06-21 09:32:41 -0400
committerEric Van Hensbergen <ericvh@gmail.com>2013-07-07 23:02:29 -0400
commit2f52d07cb75d96fcbb5b9ab72938590fa9ffb19d (patch)
treeec7258ce3685ab86bbea93f8b5dd666fcc6da98b /net/9p
parentb530e252e291c27fdcb1b73c72ad17f75c8bdba6 (diff)
9P/RDMA: Improve error handling in rdma_request
Most importantly: - do not free the recv context (rpl_context) after a successful post_recv() - but do free the send context (c) after a failed send. Signed-off-by: Simon Derr <simon.derr@bull.net> Signed-off-by: Eric Van Hensbergen <ericvh@gmail.com>
Diffstat (limited to 'net/9p')
-rw-r--r--net/9p/trans_rdma.c44
1 files changed, 28 insertions, 16 deletions
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c
index 1bd4c7150114..926e72d00e57 100644
--- a/net/9p/trans_rdma.c
+++ b/net/9p/trans_rdma.c
@@ -430,7 +430,7 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
430 rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS); 430 rpl_context = kmalloc(sizeof *rpl_context, GFP_NOFS);
431 if (!rpl_context) { 431 if (!rpl_context) {
432 err = -ENOMEM; 432 err = -ENOMEM;
433 goto err_close; 433 goto recv_error;
434 } 434 }
435 rpl_context->rc = req->rc; 435 rpl_context->rc = req->rc;
436 436
@@ -441,13 +441,15 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
441 * outstanding request, so we must keep a count to avoid 441 * outstanding request, so we must keep a count to avoid
442 * overflowing the RQ. 442 * overflowing the RQ.
443 */ 443 */
444 if (down_interruptible(&rdma->rq_sem)) 444 if (down_interruptible(&rdma->rq_sem)) {
445 goto error; /* FIXME : -EINTR instead */ 445 err = -EINTR;
446 goto recv_error;
447 }
446 448
447 err = post_recv(client, rpl_context); 449 err = post_recv(client, rpl_context);
448 if (err) { 450 if (err) {
449 p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n"); 451 p9_debug(P9_DEBUG_FCALL, "POST RECV failed\n");
450 goto err_free; 452 goto recv_error;
451 } 453 }
452 454
453 /* remove posted receive buffer from request structure */ 455 /* remove posted receive buffer from request structure */
@@ -457,15 +459,17 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
457 c = kmalloc(sizeof *c, GFP_NOFS); 459 c = kmalloc(sizeof *c, GFP_NOFS);
458 if (!c) { 460 if (!c) {
459 err = -ENOMEM; 461 err = -ENOMEM;
460 goto err_free; 462 goto send_error;
461 } 463 }
462 c->req = req; 464 c->req = req;
463 465
464 c->busa = ib_dma_map_single(rdma->cm_id->device, 466 c->busa = ib_dma_map_single(rdma->cm_id->device,
465 c->req->tc->sdata, c->req->tc->size, 467 c->req->tc->sdata, c->req->tc->size,
466 DMA_TO_DEVICE); 468 DMA_TO_DEVICE);
467 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) 469 if (ib_dma_mapping_error(rdma->cm_id->device, c->busa)) {
468 goto error; 470 err = -EIO;
471 goto send_error;
472 }
469 473
470 sge.addr = c->busa; 474 sge.addr = c->busa;
471 sge.length = c->req->tc->size; 475 sge.length = c->req->tc->size;
@@ -479,19 +483,27 @@ static int rdma_request(struct p9_client *client, struct p9_req_t *req)
479 wr.sg_list = &sge; 483 wr.sg_list = &sge;
480 wr.num_sge = 1; 484 wr.num_sge = 1;
481 485
482 if (down_interruptible(&rdma->sq_sem)) 486 if (down_interruptible(&rdma->sq_sem)) {
483 goto error; 487 err = -EINTR;
488 goto send_error;
489 }
484 490
485 return ib_post_send(rdma->qp, &wr, &bad_wr); 491 err = ib_post_send(rdma->qp, &wr, &bad_wr);
492 if (err)
493 goto send_error;
486 494
487 error: 495 /* Success */
496 return 0;
497
498 /* Handle errors that happened during or while preparing the send: */
499 send_error:
488 kfree(c); 500 kfree(c);
501 p9_debug(P9_DEBUG_ERROR, "Error %d in rdma_request()\n", err);
502 return err;
503
504 /* Handle errors that happened during or while preparing post_recv(): */
505 recv_error:
489 kfree(rpl_context); 506 kfree(rpl_context);
490 p9_debug(P9_DEBUG_ERROR, "EIO\n");
491 return -EIO;
492 err_free:
493 kfree(rpl_context);
494 err_close:
495 spin_lock_irqsave(&rdma->req_lock, flags); 507 spin_lock_irqsave(&rdma->req_lock, flags);
496 if (rdma->state < P9_RDMA_CLOSING) { 508 if (rdma->state < P9_RDMA_CLOSING) {
497 rdma->state = P9_RDMA_CLOSING; 509 rdma->state = P9_RDMA_CLOSING;