aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/infiniband/hw/cxgb3/cxio_hal.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_mr.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c17
3 files changed, 19 insertions, 14 deletions
diff --git a/drivers/infiniband/hw/cxgb3/cxio_hal.c b/drivers/infiniband/hw/cxgb3/cxio_hal.c
index 818cf1aee8c7..f5e9aeec6f6e 100644
--- a/drivers/infiniband/hw/cxgb3/cxio_hal.c
+++ b/drivers/infiniband/hw/cxgb3/cxio_hal.c
@@ -498,9 +498,9 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
498 u64 sge_cmd, ctx0, ctx1; 498 u64 sge_cmd, ctx0, ctx1;
499 u64 base_addr; 499 u64 base_addr;
500 struct t3_modify_qp_wr *wqe; 500 struct t3_modify_qp_wr *wqe;
501 struct sk_buff *skb = alloc_skb(sizeof(*wqe), GFP_KERNEL); 501 struct sk_buff *skb;
502
503 502
503 skb = alloc_skb(sizeof(*wqe), GFP_KERNEL);
504 if (!skb) { 504 if (!skb) {
505 PDBG("%s alloc_skb failed\n", __FUNCTION__); 505 PDBG("%s alloc_skb failed\n", __FUNCTION__);
506 return -ENOMEM; 506 return -ENOMEM;
@@ -508,7 +508,7 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
508 err = cxio_hal_init_ctrl_cq(rdev_p); 508 err = cxio_hal_init_ctrl_cq(rdev_p);
509 if (err) { 509 if (err) {
510 PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err); 510 PDBG("%s err %d initializing ctrl_cq\n", __FUNCTION__, err);
511 return err; 511 goto err;
512 } 512 }
513 rdev_p->ctrl_qp.workq = dma_alloc_coherent( 513 rdev_p->ctrl_qp.workq = dma_alloc_coherent(
514 &(rdev_p->rnic_info.pdev->dev), 514 &(rdev_p->rnic_info.pdev->dev),
@@ -518,7 +518,8 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
518 GFP_KERNEL); 518 GFP_KERNEL);
519 if (!rdev_p->ctrl_qp.workq) { 519 if (!rdev_p->ctrl_qp.workq) {
520 PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__); 520 PDBG("%s dma_alloc_coherent failed\n", __FUNCTION__);
521 return -ENOMEM; 521 err = -ENOMEM;
522 goto err;
522 } 523 }
523 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping, 524 pci_unmap_addr_set(&rdev_p->ctrl_qp, mapping,
524 rdev_p->ctrl_qp.dma_addr); 525 rdev_p->ctrl_qp.dma_addr);
@@ -556,6 +557,9 @@ static int cxio_hal_init_ctrl_qp(struct cxio_rdev *rdev_p)
556 rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2); 557 rdev_p->ctrl_qp.workq, 1 << T3_CTRL_QP_SIZE_LOG2);
557 skb->priority = CPL_PRIORITY_CONTROL; 558 skb->priority = CPL_PRIORITY_CONTROL;
558 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb)); 559 return (cxgb3_ofld_send(rdev_p->t3cdev_p, skb));
560err:
561 kfree_skb(skb);
562 return err;
559} 563}
560 564
561static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p) 565static int cxio_hal_destroy_ctrl_qp(struct cxio_rdev *rdev_p)
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
index 8e4846b5c641..fdb576dcfaa8 100644
--- a/drivers/infiniband/hw/mthca/mthca_mr.c
+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
@@ -881,8 +881,8 @@ int mthca_init_mr_table(struct mthca_dev *dev)
881 } 881 }
882 mpts = mtts = 1 << i; 882 mpts = mtts = 1 << i;
883 } else { 883 } else {
884 mpts = dev->limits.num_mtt_segs; 884 mtts = dev->limits.num_mtt_segs;
885 mtts = dev->limits.num_mpts; 885 mpts = dev->limits.num_mpts;
886 } 886 }
887 887
888 if (!mthca_is_memfree(dev) && 888 if (!mthca_is_memfree(dev) &&
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 89e37283c836..278fcbccc2d9 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -658,6 +658,7 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
658{ 658{
659 int deferred; 659 int deferred;
660 int is_rdma_aligned = 1; 660 int is_rdma_aligned = 1;
661 struct iser_regd_buf *regd;
661 662
662 /* if we were reading, copy back to unaligned sglist, 663 /* if we were reading, copy back to unaligned sglist,
663 * anyway dma_unmap and free the copy 664 * anyway dma_unmap and free the copy
@@ -672,20 +673,20 @@ void iser_ctask_rdma_finalize(struct iscsi_iser_cmd_task *iser_ctask)
672 } 673 }
673 674
674 if (iser_ctask->dir[ISER_DIR_IN]) { 675 if (iser_ctask->dir[ISER_DIR_IN]) {
675 deferred = iser_regd_buff_release 676 regd = &iser_ctask->rdma_regd[ISER_DIR_IN];
676 (&iser_ctask->rdma_regd[ISER_DIR_IN]); 677 deferred = iser_regd_buff_release(regd);
677 if (deferred) { 678 if (deferred) {
678 iser_err("References remain for BUF-IN rdma reg\n"); 679 iser_err("%d references remain for BUF-IN rdma reg\n",
679 BUG(); 680 atomic_read(&regd->ref_count));
680 } 681 }
681 } 682 }
682 683
683 if (iser_ctask->dir[ISER_DIR_OUT]) { 684 if (iser_ctask->dir[ISER_DIR_OUT]) {
684 deferred = iser_regd_buff_release 685 regd = &iser_ctask->rdma_regd[ISER_DIR_OUT];
685 (&iser_ctask->rdma_regd[ISER_DIR_OUT]); 686 deferred = iser_regd_buff_release(regd);
686 if (deferred) { 687 if (deferred) {
687 iser_err("References remain for BUF-OUT rdma reg\n"); 688 iser_err("%d references remain for BUF-OUT rdma reg\n",
688 BUG(); 689 atomic_read(&regd->ref_count));
689 } 690 }
690 } 691 }
691 692