aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca/mthca_qp.c
diff options
context:
space:
mode:
authorRoland Dreier <roland@eddore.topspincom.com>2005-08-18 16:39:31 -0400
committerRoland Dreier <rolandd@cisco.com>2005-08-26 23:37:37 -0400
commit87b816706bb2b79fbaff8e0b8e279e783273383e (patch)
tree94e9a87fd5cbf1f069cba4e8a766ef718d18e5e4 /drivers/infiniband/hw/mthca/mthca_qp.c
parentf520ba5aa48e2891c3fb3e364eeaaab4212c7c45 (diff)
[PATCH] IB/mthca: Factor out common queue alloc code
Clean up the allocation of memory for queues by factoring out the common code into mthca_buf_alloc() and mthca_buf_free(). Now CQs and QPs share the same queue allocation code, which we'll also use for SRQs. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_qp.c')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c111
1 files changed, 7 insertions, 104 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index b7e3d2342799..b5a0bef15b7e 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -926,10 +926,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
926 struct mthca_qp *qp) 926 struct mthca_qp *qp)
927{ 927{
928 int size; 928 int size;
929 int i;
930 int npages, shift;
931 dma_addr_t t;
932 u64 *dma_list = NULL;
933 int err = -ENOMEM; 929 int err = -ENOMEM;
934 930
935 size = sizeof (struct mthca_next_seg) + 931 size = sizeof (struct mthca_next_seg) +
@@ -979,116 +975,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
979 if (!qp->wrid) 975 if (!qp->wrid)
980 goto err_out; 976 goto err_out;
981 977
982 if (size <= MTHCA_MAX_DIRECT_QP_SIZE) { 978 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
983 qp->is_direct = 1; 979 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
984 npages = 1;
985 shift = get_order(size) + PAGE_SHIFT;
986
987 if (0)
988 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
989 size, shift);
990
991 qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
992 &t, GFP_KERNEL);
993 if (!qp->queue.direct.buf)
994 goto err_out;
995
996 pci_unmap_addr_set(&qp->queue.direct, mapping, t);
997
998 memset(qp->queue.direct.buf, 0, size);
999
1000 while (t & ((1 << shift) - 1)) {
1001 --shift;
1002 npages *= 2;
1003 }
1004
1005 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1006 if (!dma_list)
1007 goto err_out_free;
1008
1009 for (i = 0; i < npages; ++i)
1010 dma_list[i] = t + i * (1 << shift);
1011 } else {
1012 qp->is_direct = 0;
1013 npages = size / PAGE_SIZE;
1014 shift = PAGE_SHIFT;
1015
1016 if (0)
1017 mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
1018
1019 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1020 if (!dma_list)
1021 goto err_out;
1022
1023 qp->queue.page_list = kmalloc(npages *
1024 sizeof *qp->queue.page_list,
1025 GFP_KERNEL);
1026 if (!qp->queue.page_list)
1027 goto err_out;
1028
1029 for (i = 0; i < npages; ++i) {
1030 qp->queue.page_list[i].buf =
1031 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
1032 &t, GFP_KERNEL);
1033 if (!qp->queue.page_list[i].buf)
1034 goto err_out_free;
1035
1036 memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
1037
1038 pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
1039 dma_list[i] = t;
1040 }
1041 }
1042
1043 err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
1044 npages, 0, size,
1045 MTHCA_MPT_FLAG_LOCAL_READ,
1046 &qp->mr);
1047 if (err) 980 if (err)
1048 goto err_out_free; 981 goto err_out;
1049 982
1050 kfree(dma_list);
1051 return 0; 983 return 0;
1052 984
1053 err_out_free: 985err_out:
1054 if (qp->is_direct) {
1055 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1056 pci_unmap_addr(&qp->queue.direct, mapping));
1057 } else
1058 for (i = 0; i < npages; ++i) {
1059 if (qp->queue.page_list[i].buf)
1060 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1061 qp->queue.page_list[i].buf,
1062 pci_unmap_addr(&qp->queue.page_list[i],
1063 mapping));
1064
1065 }
1066
1067 err_out:
1068 kfree(qp->wrid); 986 kfree(qp->wrid);
1069 kfree(dma_list);
1070 return err; 987 return err;
1071} 988}
1072 989
1073static void mthca_free_wqe_buf(struct mthca_dev *dev, 990static void mthca_free_wqe_buf(struct mthca_dev *dev,
1074 struct mthca_qp *qp) 991 struct mthca_qp *qp)
1075{ 992{
1076 int i; 993 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1077 int size = PAGE_ALIGN(qp->send_wqe_offset + 994 (qp->sq.max << qp->sq.wqe_shift)),
1078 (qp->sq.max << qp->sq.wqe_shift)); 995 &qp->queue, qp->is_direct, &qp->mr);
1079
1080 if (qp->is_direct) {
1081 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1082 pci_unmap_addr(&qp->queue.direct, mapping));
1083 } else {
1084 for (i = 0; i < size / PAGE_SIZE; ++i) {
1085 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1086 qp->queue.page_list[i].buf,
1087 pci_unmap_addr(&qp->queue.page_list[i],
1088 mapping));
1089 }
1090 }
1091
1092 kfree(qp->wrid); 996 kfree(qp->wrid);
1093} 997}
1094 998
@@ -1433,7 +1337,6 @@ void mthca_free_qp(struct mthca_dev *dev,
1433 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1337 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1434 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1338 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
1435 1339
1436 mthca_free_mr(dev, &qp->mr);
1437 mthca_free_memfree(dev, qp); 1340 mthca_free_memfree(dev, qp);
1438 mthca_free_wqe_buf(dev, qp); 1341 mthca_free_wqe_buf(dev, qp);
1439 } 1342 }