aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorRoland Dreier <roland@eddore.topspincom.com>2005-08-18 16:39:31 -0400
committerRoland Dreier <rolandd@cisco.com>2005-08-26 23:37:37 -0400
commit87b816706bb2b79fbaff8e0b8e279e783273383e (patch)
tree94e9a87fd5cbf1f069cba4e8a766ef718d18e5e4 /drivers/infiniband
parentf520ba5aa48e2891c3fb3e364eeaaab4212c7c45 (diff)
[PATCH] IB/mthca: Factor out common queue alloc code
Clean up the allocation of memory for queues by factoring out the common code into mthca_buf_alloc() and mthca_buf_free(). Now CQs and QPs share the same queue allocation code, which we'll also use for SRQs. Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_allocator.c116
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c118
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h5
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h15
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c111
5 files changed, 141 insertions, 224 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_allocator.c b/drivers/infiniband/hw/mthca/mthca_allocator.c
index b1db48dd91d6..9ba3211cef7c 100644
--- a/drivers/infiniband/hw/mthca/mthca_allocator.c
+++ b/drivers/infiniband/hw/mthca/mthca_allocator.c
@@ -177,3 +177,119 @@ void mthca_array_cleanup(struct mthca_array *array, int nent)
177 177
178 kfree(array->page_list); 178 kfree(array->page_list);
179} 179}
180
181/*
182 * Handling for queue buffers -- we allocate a bunch of memory and
183 * register it in a memory region at HCA virtual address 0. If the
184 * requested size is > max_direct, we split the allocation into
185 * multiple pages, so we don't require too much contiguous memory.
186 */
187
188int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
189 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
190 int hca_write, struct mthca_mr *mr)
191{
192 int err = -ENOMEM;
193 int npages, shift;
194 u64 *dma_list = NULL;
195 dma_addr_t t;
196 int i;
197
198 if (size <= max_direct) {
199 *is_direct = 1;
200 npages = 1;
201 shift = get_order(size) + PAGE_SHIFT;
202
203 buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev,
204 size, &t, GFP_KERNEL);
205 if (!buf->direct.buf)
206 return -ENOMEM;
207
208 pci_unmap_addr_set(&buf->direct, mapping, t);
209
210 memset(buf->direct.buf, 0, size);
211
212 while (t & ((1 << shift) - 1)) {
213 --shift;
214 npages *= 2;
215 }
216
217 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
218 if (!dma_list)
219 goto err_free;
220
221 for (i = 0; i < npages; ++i)
222 dma_list[i] = t + i * (1 << shift);
223 } else {
224 *is_direct = 0;
225 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
226 shift = PAGE_SHIFT;
227
228 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
229 if (!dma_list)
230 return -ENOMEM;
231
232 buf->page_list = kmalloc(npages * sizeof *buf->page_list,
233 GFP_KERNEL);
234 if (!buf->page_list)
235 goto err_out;
236
237 for (i = 0; i < npages; ++i)
238 buf->page_list[i].buf = NULL;
239
240 for (i = 0; i < npages; ++i) {
241 buf->page_list[i].buf =
242 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
243 &t, GFP_KERNEL);
244 if (!buf->page_list[i].buf)
245 goto err_free;
246
247 dma_list[i] = t;
248 pci_unmap_addr_set(&buf->page_list[i], mapping, t);
249
250 memset(buf->page_list[i].buf, 0, PAGE_SIZE);
251 }
252 }
253
254 err = mthca_mr_alloc_phys(dev, pd->pd_num,
255 dma_list, shift, npages,
256 0, size,
257 MTHCA_MPT_FLAG_LOCAL_READ |
258 (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0),
259 mr);
260 if (err)
261 goto err_free;
262
263 kfree(dma_list);
264
265 return 0;
266
267err_free:
268 mthca_buf_free(dev, size, buf, *is_direct, NULL);
269
270err_out:
271 kfree(dma_list);
272
273 return err;
274}
275
276void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
277 int is_direct, struct mthca_mr *mr)
278{
279 int i;
280
281 if (mr)
282 mthca_free_mr(dev, mr);
283
284 if (is_direct)
285 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
286 pci_unmap_addr(&buf->direct, mapping));
287 else {
288 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
289 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
290 buf->page_list[i].buf,
291 pci_unmap_addr(&buf->page_list[i],
292 mapping));
293 kfree(buf->page_list);
294 }
295}
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 907867d1f2e0..8afb9ee2fbc6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -639,113 +639,8 @@ int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify)
639 639
640static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) 640static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq)
641{ 641{
642 int i; 642 mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
643 int size; 643 &cq->queue, cq->is_direct, &cq->mr);
644
645 if (cq->is_direct)
646 dma_free_coherent(&dev->pdev->dev,
647 (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE,
648 cq->queue.direct.buf,
649 pci_unmap_addr(&cq->queue.direct,
650 mapping));
651 else {
652 size = (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE;
653 for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i)
654 if (cq->queue.page_list[i].buf)
655 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
656 cq->queue.page_list[i].buf,
657 pci_unmap_addr(&cq->queue.page_list[i],
658 mapping));
659
660 kfree(cq->queue.page_list);
661 }
662}
663
664static int mthca_alloc_cq_buf(struct mthca_dev *dev, int size,
665 struct mthca_cq *cq)
666{
667 int err = -ENOMEM;
668 int npages, shift;
669 u64 *dma_list = NULL;
670 dma_addr_t t;
671 int i;
672
673 if (size <= MTHCA_MAX_DIRECT_CQ_SIZE) {
674 cq->is_direct = 1;
675 npages = 1;
676 shift = get_order(size) + PAGE_SHIFT;
677
678 cq->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev,
679 size, &t, GFP_KERNEL);
680 if (!cq->queue.direct.buf)
681 return -ENOMEM;
682
683 pci_unmap_addr_set(&cq->queue.direct, mapping, t);
684
685 memset(cq->queue.direct.buf, 0, size);
686
687 while (t & ((1 << shift) - 1)) {
688 --shift;
689 npages *= 2;
690 }
691
692 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
693 if (!dma_list)
694 goto err_free;
695
696 for (i = 0; i < npages; ++i)
697 dma_list[i] = t + i * (1 << shift);
698 } else {
699 cq->is_direct = 0;
700 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
701 shift = PAGE_SHIFT;
702
703 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
704 if (!dma_list)
705 return -ENOMEM;
706
707 cq->queue.page_list = kmalloc(npages * sizeof *cq->queue.page_list,
708 GFP_KERNEL);
709 if (!cq->queue.page_list)
710 goto err_out;
711
712 for (i = 0; i < npages; ++i)
713 cq->queue.page_list[i].buf = NULL;
714
715 for (i = 0; i < npages; ++i) {
716 cq->queue.page_list[i].buf =
717 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
718 &t, GFP_KERNEL);
719 if (!cq->queue.page_list[i].buf)
720 goto err_free;
721
722 dma_list[i] = t;
723 pci_unmap_addr_set(&cq->queue.page_list[i], mapping, t);
724
725 memset(cq->queue.page_list[i].buf, 0, PAGE_SIZE);
726 }
727 }
728
729 err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num,
730 dma_list, shift, npages,
731 0, size,
732 MTHCA_MPT_FLAG_LOCAL_WRITE |
733 MTHCA_MPT_FLAG_LOCAL_READ,
734 &cq->mr);
735 if (err)
736 goto err_free;
737
738 kfree(dma_list);
739
740 return 0;
741
742err_free:
743 mthca_free_cq_buf(dev, cq);
744
745err_out:
746 kfree(dma_list);
747
748 return err;
749} 644}
750 645
751int mthca_init_cq(struct mthca_dev *dev, int nent, 646int mthca_init_cq(struct mthca_dev *dev, int nent,
@@ -797,7 +692,9 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
797 cq_context = mailbox->buf; 692 cq_context = mailbox->buf;
798 693
799 if (cq->is_kernel) { 694 if (cq->is_kernel) {
800 err = mthca_alloc_cq_buf(dev, size, cq); 695 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE,
696 &cq->queue, &cq->is_direct,
697 &dev->driver_pd, 1, &cq->mr);
801 if (err) 698 if (err)
802 goto err_out_mailbox; 699 goto err_out_mailbox;
803 700
@@ -858,10 +755,8 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
858 return 0; 755 return 0;
859 756
860err_out_free_mr: 757err_out_free_mr:
861 if (cq->is_kernel) { 758 if (cq->is_kernel)
862 mthca_free_mr(dev, &cq->mr);
863 mthca_free_cq_buf(dev, cq); 759 mthca_free_cq_buf(dev, cq);
864 }
865 760
866err_out_mailbox: 761err_out_mailbox:
867 mthca_free_mailbox(dev, mailbox); 762 mthca_free_mailbox(dev, mailbox);
@@ -929,7 +824,6 @@ void mthca_free_cq(struct mthca_dev *dev,
929 wait_event(cq->wait, !atomic_read(&cq->refcount)); 824 wait_event(cq->wait, !atomic_read(&cq->refcount));
930 825
931 if (cq->is_kernel) { 826 if (cq->is_kernel) {
932 mthca_free_mr(dev, &cq->mr);
933 mthca_free_cq_buf(dev, cq); 827 mthca_free_cq_buf(dev, cq);
934 if (mthca_is_memfree(dev)) { 828 if (mthca_is_memfree(dev)) {
935 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 829 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 0f90a173ecee..cb78b5d07201 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -361,6 +361,11 @@ int mthca_array_set(struct mthca_array *array, int index, void *value);
361void mthca_array_clear(struct mthca_array *array, int index); 361void mthca_array_clear(struct mthca_array *array, int index);
362int mthca_array_init(struct mthca_array *array, int nent); 362int mthca_array_init(struct mthca_array *array, int nent);
363void mthca_array_cleanup(struct mthca_array *array, int nent); 363void mthca_array_cleanup(struct mthca_array *array, int nent);
364int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct,
365 union mthca_buf *buf, int *is_direct, struct mthca_pd *pd,
366 int hca_write, struct mthca_mr *mr);
367void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf,
368 int is_direct, struct mthca_mr *mr);
364 369
365int mthca_init_uar_table(struct mthca_dev *dev); 370int mthca_init_uar_table(struct mthca_dev *dev);
366int mthca_init_pd_table(struct mthca_dev *dev); 371int mthca_init_pd_table(struct mthca_dev *dev);
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 624651edf577..b95249ee46cf 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -51,6 +51,11 @@ struct mthca_buf_list {
51 DECLARE_PCI_UNMAP_ADDR(mapping) 51 DECLARE_PCI_UNMAP_ADDR(mapping)
52}; 52};
53 53
54union mthca_buf {
55 struct mthca_buf_list direct;
56 struct mthca_buf_list *page_list;
57};
58
54struct mthca_uar { 59struct mthca_uar {
55 unsigned long pfn; 60 unsigned long pfn;
56 int index; 61 int index;
@@ -187,10 +192,7 @@ struct mthca_cq {
187 __be32 *arm_db; 192 __be32 *arm_db;
188 int arm_sn; 193 int arm_sn;
189 194
190 union { 195 union mthca_buf queue;
191 struct mthca_buf_list direct;
192 struct mthca_buf_list *page_list;
193 } queue;
194 struct mthca_mr mr; 196 struct mthca_mr mr;
195 wait_queue_head_t wait; 197 wait_queue_head_t wait;
196}; 198};
@@ -228,10 +230,7 @@ struct mthca_qp {
228 int send_wqe_offset; 230 int send_wqe_offset;
229 231
230 u64 *wrid; 232 u64 *wrid;
231 union { 233 union mthca_buf queue;
232 struct mthca_buf_list direct;
233 struct mthca_buf_list *page_list;
234 } queue;
235 234
236 wait_queue_head_t wait; 235 wait_queue_head_t wait;
237}; 236};
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index b7e3d2342799..b5a0bef15b7e 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -926,10 +926,6 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
926 struct mthca_qp *qp) 926 struct mthca_qp *qp)
927{ 927{
928 int size; 928 int size;
929 int i;
930 int npages, shift;
931 dma_addr_t t;
932 u64 *dma_list = NULL;
933 int err = -ENOMEM; 929 int err = -ENOMEM;
934 930
935 size = sizeof (struct mthca_next_seg) + 931 size = sizeof (struct mthca_next_seg) +
@@ -979,116 +975,24 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
979 if (!qp->wrid) 975 if (!qp->wrid)
980 goto err_out; 976 goto err_out;
981 977
982 if (size <= MTHCA_MAX_DIRECT_QP_SIZE) { 978 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
983 qp->is_direct = 1; 979 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
984 npages = 1;
985 shift = get_order(size) + PAGE_SHIFT;
986
987 if (0)
988 mthca_dbg(dev, "Creating direct QP of size %d (shift %d)\n",
989 size, shift);
990
991 qp->queue.direct.buf = dma_alloc_coherent(&dev->pdev->dev, size,
992 &t, GFP_KERNEL);
993 if (!qp->queue.direct.buf)
994 goto err_out;
995
996 pci_unmap_addr_set(&qp->queue.direct, mapping, t);
997
998 memset(qp->queue.direct.buf, 0, size);
999
1000 while (t & ((1 << shift) - 1)) {
1001 --shift;
1002 npages *= 2;
1003 }
1004
1005 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1006 if (!dma_list)
1007 goto err_out_free;
1008
1009 for (i = 0; i < npages; ++i)
1010 dma_list[i] = t + i * (1 << shift);
1011 } else {
1012 qp->is_direct = 0;
1013 npages = size / PAGE_SIZE;
1014 shift = PAGE_SHIFT;
1015
1016 if (0)
1017 mthca_dbg(dev, "Creating indirect QP with %d pages\n", npages);
1018
1019 dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL);
1020 if (!dma_list)
1021 goto err_out;
1022
1023 qp->queue.page_list = kmalloc(npages *
1024 sizeof *qp->queue.page_list,
1025 GFP_KERNEL);
1026 if (!qp->queue.page_list)
1027 goto err_out;
1028
1029 for (i = 0; i < npages; ++i) {
1030 qp->queue.page_list[i].buf =
1031 dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE,
1032 &t, GFP_KERNEL);
1033 if (!qp->queue.page_list[i].buf)
1034 goto err_out_free;
1035
1036 memset(qp->queue.page_list[i].buf, 0, PAGE_SIZE);
1037
1038 pci_unmap_addr_set(&qp->queue.page_list[i], mapping, t);
1039 dma_list[i] = t;
1040 }
1041 }
1042
1043 err = mthca_mr_alloc_phys(dev, pd->pd_num, dma_list, shift,
1044 npages, 0, size,
1045 MTHCA_MPT_FLAG_LOCAL_READ,
1046 &qp->mr);
1047 if (err) 980 if (err)
1048 goto err_out_free; 981 goto err_out;
1049 982
1050 kfree(dma_list);
1051 return 0; 983 return 0;
1052 984
1053 err_out_free: 985err_out:
1054 if (qp->is_direct) {
1055 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1056 pci_unmap_addr(&qp->queue.direct, mapping));
1057 } else
1058 for (i = 0; i < npages; ++i) {
1059 if (qp->queue.page_list[i].buf)
1060 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1061 qp->queue.page_list[i].buf,
1062 pci_unmap_addr(&qp->queue.page_list[i],
1063 mapping));
1064
1065 }
1066
1067 err_out:
1068 kfree(qp->wrid); 986 kfree(qp->wrid);
1069 kfree(dma_list);
1070 return err; 987 return err;
1071} 988}
1072 989
1073static void mthca_free_wqe_buf(struct mthca_dev *dev, 990static void mthca_free_wqe_buf(struct mthca_dev *dev,
1074 struct mthca_qp *qp) 991 struct mthca_qp *qp)
1075{ 992{
1076 int i; 993 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1077 int size = PAGE_ALIGN(qp->send_wqe_offset + 994 (qp->sq.max << qp->sq.wqe_shift)),
1078 (qp->sq.max << qp->sq.wqe_shift)); 995 &qp->queue, qp->is_direct, &qp->mr);
1079
1080 if (qp->is_direct) {
1081 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1082 pci_unmap_addr(&qp->queue.direct, mapping));
1083 } else {
1084 for (i = 0; i < size / PAGE_SIZE; ++i) {
1085 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1086 qp->queue.page_list[i].buf,
1087 pci_unmap_addr(&qp->queue.page_list[i],
1088 mapping));
1089 }
1090 }
1091
1092 kfree(qp->wrid); 996 kfree(qp->wrid);
1093} 997}
1094 998
@@ -1433,7 +1337,6 @@ void mthca_free_qp(struct mthca_dev *dev,
1433 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1337 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1434 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1338 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
1435 1339
1436 mthca_free_mr(dev, &qp->mr);
1437 mthca_free_memfree(dev, qp); 1340 mthca_free_memfree(dev, qp);
1438 mthca_free_wqe_buf(dev, qp); 1341 mthca_free_wqe_buf(dev, qp);
1439 } 1342 }