aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/mthca
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/mthca')
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c76
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h6
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.c141
-rw-r--r--drivers/infiniband/hw/mthca/mthca_memfree.h14
-rw-r--r--drivers/infiniband/hw/mthca/mthca_pd.c24
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c330
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c215
-rw-r--r--drivers/infiniband/hw/mthca/mthca_user.h81
10 files changed, 762 insertions, 143 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index 766e9031ec45..b5aea7b869f6 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -742,6 +743,7 @@ err_out:
742} 743}
743 744
744int mthca_init_cq(struct mthca_dev *dev, int nent, 745int mthca_init_cq(struct mthca_dev *dev, int nent,
746 struct mthca_ucontext *ctx, u32 pdn,
745 struct mthca_cq *cq) 747 struct mthca_cq *cq)
746{ 748{
747 int size = nent * MTHCA_CQ_ENTRY_SIZE; 749 int size = nent * MTHCA_CQ_ENTRY_SIZE;
@@ -753,30 +755,33 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
753 755
754 might_sleep(); 756 might_sleep();
755 757
756 cq->ibcq.cqe = nent - 1; 758 cq->ibcq.cqe = nent - 1;
759 cq->is_kernel = !ctx;
757 760
758 cq->cqn = mthca_alloc(&dev->cq_table.alloc); 761 cq->cqn = mthca_alloc(&dev->cq_table.alloc);
759 if (cq->cqn == -1) 762 if (cq->cqn == -1)
760 return -ENOMEM; 763 return -ENOMEM;
761 764
762 if (mthca_is_memfree(dev)) { 765 if (mthca_is_memfree(dev)) {
763 cq->arm_sn = 1;
764
765 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); 766 err = mthca_table_get(dev, dev->cq_table.table, cq->cqn);
766 if (err) 767 if (err)
767 goto err_out; 768 goto err_out;
768 769
769 err = -ENOMEM; 770 if (cq->is_kernel) {
771 cq->arm_sn = 1;
772
773 err = -ENOMEM;
770 774
771 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, 775 cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI,
772 cq->cqn, &cq->set_ci_db); 776 cq->cqn, &cq->set_ci_db);
773 if (cq->set_ci_db_index < 0) 777 if (cq->set_ci_db_index < 0)
774 goto err_out_icm; 778 goto err_out_icm;
775 779
776 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM, 780 cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM,
777 cq->cqn, &cq->arm_db); 781 cq->cqn, &cq->arm_db);
778 if (cq->arm_db_index < 0) 782 if (cq->arm_db_index < 0)
779 goto err_out_ci; 783 goto err_out_ci;
784 }
780 } 785 }
781 786
782 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); 787 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
@@ -785,12 +790,14 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
785 790
786 cq_context = mailbox->buf; 791 cq_context = mailbox->buf;
787 792
788 err = mthca_alloc_cq_buf(dev, size, cq); 793 if (cq->is_kernel) {
789 if (err) 794 err = mthca_alloc_cq_buf(dev, size, cq);
790 goto err_out_mailbox; 795 if (err)
796 goto err_out_mailbox;
791 797
792 for (i = 0; i < nent; ++i) 798 for (i = 0; i < nent; ++i)
793 set_cqe_hw(get_cqe(cq, i)); 799 set_cqe_hw(get_cqe(cq, i));
800 }
794 801
795 spin_lock_init(&cq->lock); 802 spin_lock_init(&cq->lock);
796 atomic_set(&cq->refcount, 1); 803 atomic_set(&cq->refcount, 1);
@@ -801,11 +808,14 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
801 MTHCA_CQ_STATE_DISARMED | 808 MTHCA_CQ_STATE_DISARMED |
802 MTHCA_CQ_FLAG_TR); 809 MTHCA_CQ_FLAG_TR);
803 cq_context->start = cpu_to_be64(0); 810 cq_context->start = cpu_to_be64(0);
804 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24 | 811 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24);
805 dev->driver_uar.index); 812 if (ctx)
813 cq_context->logsize_usrpage |= cpu_to_be32(ctx->uar.index);
814 else
815 cq_context->logsize_usrpage |= cpu_to_be32(dev->driver_uar.index);
806 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); 816 cq_context->error_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn);
807 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); 817 cq_context->comp_eqn = cpu_to_be32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn);
808 cq_context->pd = cpu_to_be32(dev->driver_pd.pd_num); 818 cq_context->pd = cpu_to_be32(pdn);
809 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey); 819 cq_context->lkey = cpu_to_be32(cq->mr.ibmr.lkey);
810 cq_context->cqn = cpu_to_be32(cq->cqn); 820 cq_context->cqn = cpu_to_be32(cq->cqn);
811 821
@@ -843,18 +853,20 @@ int mthca_init_cq(struct mthca_dev *dev, int nent,
843 return 0; 853 return 0;
844 854
845err_out_free_mr: 855err_out_free_mr:
846 mthca_free_mr(dev, &cq->mr); 856 if (cq->is_kernel) {
847 mthca_free_cq_buf(dev, cq); 857 mthca_free_mr(dev, &cq->mr);
858 mthca_free_cq_buf(dev, cq);
859 }
848 860
849err_out_mailbox: 861err_out_mailbox:
850 mthca_free_mailbox(dev, mailbox); 862 mthca_free_mailbox(dev, mailbox);
851 863
852err_out_arm: 864err_out_arm:
853 if (mthca_is_memfree(dev)) 865 if (cq->is_kernel && mthca_is_memfree(dev))
854 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 866 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
855 867
856err_out_ci: 868err_out_ci:
857 if (mthca_is_memfree(dev)) 869 if (cq->is_kernel && mthca_is_memfree(dev))
858 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 870 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
859 871
860err_out_icm: 872err_out_icm:
@@ -892,7 +904,8 @@ void mthca_free_cq(struct mthca_dev *dev,
892 int j; 904 int j;
893 905
894 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n", 906 printk(KERN_ERR "context for CQN %x (cons index %x, next sw %d)\n",
895 cq->cqn, cq->cons_index, !!next_cqe_sw(cq)); 907 cq->cqn, cq->cons_index,
908 cq->is_kernel ? !!next_cqe_sw(cq) : 0);
896 for (j = 0; j < 16; ++j) 909 for (j = 0; j < 16; ++j)
897 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j])); 910 printk(KERN_ERR "[%2x] %08x\n", j * 4, be32_to_cpu(ctx[j]));
898 } 911 }
@@ -910,12 +923,13 @@ void mthca_free_cq(struct mthca_dev *dev,
910 atomic_dec(&cq->refcount); 923 atomic_dec(&cq->refcount);
911 wait_event(cq->wait, !atomic_read(&cq->refcount)); 924 wait_event(cq->wait, !atomic_read(&cq->refcount));
912 925
913 mthca_free_mr(dev, &cq->mr); 926 if (cq->is_kernel) {
914 mthca_free_cq_buf(dev, cq); 927 mthca_free_mr(dev, &cq->mr);
915 928 mthca_free_cq_buf(dev, cq);
916 if (mthca_is_memfree(dev)) { 929 if (mthca_is_memfree(dev)) {
917 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); 930 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index);
918 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); 931 mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index);
932 }
919 } 933 }
920 934
921 mthca_table_put(dev, dev->cq_table.table, cq->cqn); 935 mthca_table_put(dev, dev->cq_table.table, cq->cqn);
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index 4127f09dc5ec..5ecdd2eeeb0f 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -378,7 +379,7 @@ void mthca_unregister_device(struct mthca_dev *dev);
378int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); 379int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar);
379void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); 380void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar);
380 381
381int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd); 382int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd);
382void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd); 383void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd);
383 384
384struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size); 385struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size);
@@ -413,6 +414,7 @@ int mthca_poll_cq(struct ib_cq *ibcq, int num_entries,
413int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 414int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
414int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); 415int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify);
415int mthca_init_cq(struct mthca_dev *dev, int nent, 416int mthca_init_cq(struct mthca_dev *dev, int nent,
417 struct mthca_ucontext *ctx, u32 pdn,
416 struct mthca_cq *cq); 418 struct mthca_cq *cq);
417void mthca_free_cq(struct mthca_dev *dev, 419void mthca_free_cq(struct mthca_dev *dev,
418 struct mthca_cq *cq); 420 struct mthca_cq *cq);
@@ -438,12 +440,14 @@ int mthca_alloc_qp(struct mthca_dev *dev,
438 struct mthca_cq *recv_cq, 440 struct mthca_cq *recv_cq,
439 enum ib_qp_type type, 441 enum ib_qp_type type,
440 enum ib_sig_type send_policy, 442 enum ib_sig_type send_policy,
443 struct ib_qp_cap *cap,
441 struct mthca_qp *qp); 444 struct mthca_qp *qp);
442int mthca_alloc_sqp(struct mthca_dev *dev, 445int mthca_alloc_sqp(struct mthca_dev *dev,
443 struct mthca_pd *pd, 446 struct mthca_pd *pd,
444 struct mthca_cq *send_cq, 447 struct mthca_cq *send_cq,
445 struct mthca_cq *recv_cq, 448 struct mthca_cq *recv_cq,
446 enum ib_sig_type send_policy, 449 enum ib_sig_type send_policy,
450 struct ib_qp_cap *cap,
447 int qpn, 451 int qpn,
448 int port, 452 int port,
449 struct mthca_sqp *sqp); 453 struct mthca_sqp *sqp);
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 09519b604c08..2ef916859e17 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -665,7 +665,7 @@ static int __devinit mthca_setup_hca(struct mthca_dev *dev)
665 goto err_pd_table_free; 665 goto err_pd_table_free;
666 } 666 }
667 667
668 err = mthca_pd_alloc(dev, &dev->driver_pd); 668 err = mthca_pd_alloc(dev, 1, &dev->driver_pd);
669 if (err) { 669 if (err) {
670 mthca_err(dev, "Failed to create driver PD, " 670 mthca_err(dev, "Failed to create driver PD, "
671 "aborting.\n"); 671 "aborting.\n");
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c
index 6d3b05dd9e3f..2a8646150355 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.c
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -47,6 +48,15 @@ enum {
47 MTHCA_TABLE_CHUNK_SIZE = 1 << 18 48 MTHCA_TABLE_CHUNK_SIZE = 1 << 18
48}; 49};
49 50
51struct mthca_user_db_table {
52 struct semaphore mutex;
53 struct {
54 u64 uvirt;
55 struct scatterlist mem;
56 int refcount;
57 } page[0];
58};
59
50void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm) 60void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm)
51{ 61{
52 struct mthca_icm_chunk *chunk, *tmp; 62 struct mthca_icm_chunk *chunk, *tmp;
@@ -344,13 +354,133 @@ void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
344 kfree(table); 354 kfree(table);
345} 355}
346 356
347static u64 mthca_uarc_virt(struct mthca_dev *dev, int page) 357static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
348{ 358{
349 return dev->uar_table.uarc_base + 359 return dev->uar_table.uarc_base +
350 dev->driver_uar.index * dev->uar_table.uarc_size + 360 uar->index * dev->uar_table.uarc_size +
351 page * 4096; 361 page * 4096;
352} 362}
353 363
364int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
365 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
366{
367 int ret = 0;
368 u8 status;
369 int i;
370
371 if (!mthca_is_memfree(dev))
372 return 0;
373
374 if (index < 0 || index > dev->uar_table.uarc_size / 8)
375 return -EINVAL;
376
377 down(&db_tab->mutex);
378
379 i = index / MTHCA_DB_REC_PER_PAGE;
380
381 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) ||
382 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
383 (uaddr & 4095)) {
384 ret = -EINVAL;
385 goto out;
386 }
387
388 if (db_tab->page[i].refcount) {
389 ++db_tab->page[i].refcount;
390 goto out;
391 }
392
393 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
394 &db_tab->page[i].mem.page, NULL);
395 if (ret < 0)
396 goto out;
397
398 db_tab->page[i].mem.length = 4096;
399 db_tab->page[i].mem.offset = uaddr & ~PAGE_MASK;
400
401 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
402 if (ret < 0) {
403 put_page(db_tab->page[i].mem.page);
404 goto out;
405 }
406
407 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
408 mthca_uarc_virt(dev, uar, i), &status);
409 if (!ret && status)
410 ret = -EINVAL;
411 if (ret) {
412 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
413 put_page(db_tab->page[i].mem.page);
414 goto out;
415 }
416
417 db_tab->page[i].uvirt = uaddr;
418 db_tab->page[i].refcount = 1;
419
420out:
421 up(&db_tab->mutex);
422 return ret;
423}
424
425void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
426 struct mthca_user_db_table *db_tab, int index)
427{
428 if (!mthca_is_memfree(dev))
429 return;
430
431 /*
432 * To make our bookkeeping simpler, we don't unmap DB
433 * pages until we clean up the whole db table.
434 */
435
436 down(&db_tab->mutex);
437
438 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
439
440 up(&db_tab->mutex);
441}
442
443struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
444{
445 struct mthca_user_db_table *db_tab;
446 int npages;
447 int i;
448
449 if (!mthca_is_memfree(dev))
450 return NULL;
451
452 npages = dev->uar_table.uarc_size / 4096;
453 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
454 if (!db_tab)
455 return ERR_PTR(-ENOMEM);
456
457 init_MUTEX(&db_tab->mutex);
458 for (i = 0; i < npages; ++i) {
459 db_tab->page[i].refcount = 0;
460 db_tab->page[i].uvirt = 0;
461 }
462
463 return db_tab;
464}
465
466void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
467 struct mthca_user_db_table *db_tab)
468{
469 int i;
470 u8 status;
471
472 if (!mthca_is_memfree(dev))
473 return;
474
475 for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) {
476 if (db_tab->page[i].uvirt) {
477 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
478 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
479 put_page(db_tab->page[i].mem.page);
480 }
481 }
482}
483
354int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) 484int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
355{ 485{
356 int group; 486 int group;
@@ -407,7 +537,8 @@ int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db)
407 } 537 }
408 memset(page->db_rec, 0, 4096); 538 memset(page->db_rec, 0, 4096);
409 539
410 ret = mthca_MAP_ICM_page(dev, page->mapping, mthca_uarc_virt(dev, i), &status); 540 ret = mthca_MAP_ICM_page(dev, page->mapping,
541 mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
411 if (!ret && status) 542 if (!ret && status)
412 ret = -EINVAL; 543 ret = -EINVAL;
413 if (ret) { 544 if (ret) {
@@ -461,7 +592,7 @@ void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
461 592
462 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && 593 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
463 i >= dev->db_tab->max_group1 - 1) { 594 i >= dev->db_tab->max_group1 - 1) {
464 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status); 595 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
465 596
466 dma_free_coherent(&dev->pdev->dev, 4096, 597 dma_free_coherent(&dev->pdev->dev, 4096,
467 page->db_rec, page->mapping); 598 page->db_rec, page->mapping);
@@ -530,7 +661,7 @@ void mthca_cleanup_db_tab(struct mthca_dev *dev)
530 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) 661 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
531 mthca_warn(dev, "Kernel UARC page %d not empty\n", i); 662 mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
532 663
533 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status); 664 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
534 665
535 dma_free_coherent(&dev->pdev->dev, 4096, 666 dma_free_coherent(&dev->pdev->dev, 4096,
536 dev->db_tab->page[i].db_rec, 667 dev->db_tab->page[i].db_rec,
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.h b/drivers/infiniband/hw/mthca/mthca_memfree.h
index fe7be2a6bc4a..4761d844cb5f 100644
--- a/drivers/infiniband/hw/mthca/mthca_memfree.h
+++ b/drivers/infiniband/hw/mthca/mthca_memfree.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -148,7 +149,7 @@ struct mthca_db_table {
148 struct semaphore mutex; 149 struct semaphore mutex;
149}; 150};
150 151
151enum { 152enum mthca_db_type {
152 MTHCA_DB_TYPE_INVALID = 0x0, 153 MTHCA_DB_TYPE_INVALID = 0x0,
153 MTHCA_DB_TYPE_CQ_SET_CI = 0x1, 154 MTHCA_DB_TYPE_CQ_SET_CI = 0x1,
154 MTHCA_DB_TYPE_CQ_ARM = 0x2, 155 MTHCA_DB_TYPE_CQ_ARM = 0x2,
@@ -158,6 +159,17 @@ enum {
158 MTHCA_DB_TYPE_GROUP_SEP = 0x7 159 MTHCA_DB_TYPE_GROUP_SEP = 0x7
159}; 160};
160 161
162struct mthca_user_db_table;
163struct mthca_uar;
164
165int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
166 struct mthca_user_db_table *db_tab, int index, u64 uaddr);
167void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
168 struct mthca_user_db_table *db_tab, int index);
169struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev);
170void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
171 struct mthca_user_db_table *db_tab);
172
161int mthca_init_db_tab(struct mthca_dev *dev); 173int mthca_init_db_tab(struct mthca_dev *dev);
162void mthca_cleanup_db_tab(struct mthca_dev *dev); 174void mthca_cleanup_db_tab(struct mthca_dev *dev);
163int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db); 175int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db);
diff --git a/drivers/infiniband/hw/mthca/mthca_pd.c b/drivers/infiniband/hw/mthca/mthca_pd.c
index ea66847e4ea3..c2c899844e98 100644
--- a/drivers/infiniband/hw/mthca/mthca_pd.c
+++ b/drivers/infiniband/hw/mthca/mthca_pd.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -37,23 +38,27 @@
37 38
38#include "mthca_dev.h" 39#include "mthca_dev.h"
39 40
40int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd) 41int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd)
41{ 42{
42 int err; 43 int err = 0;
43 44
44 might_sleep(); 45 might_sleep();
45 46
47 pd->privileged = privileged;
48
46 atomic_set(&pd->sqp_count, 0); 49 atomic_set(&pd->sqp_count, 0);
47 pd->pd_num = mthca_alloc(&dev->pd_table.alloc); 50 pd->pd_num = mthca_alloc(&dev->pd_table.alloc);
48 if (pd->pd_num == -1) 51 if (pd->pd_num == -1)
49 return -ENOMEM; 52 return -ENOMEM;
50 53
51 err = mthca_mr_alloc_notrans(dev, pd->pd_num, 54 if (privileged) {
52 MTHCA_MPT_FLAG_LOCAL_READ | 55 err = mthca_mr_alloc_notrans(dev, pd->pd_num,
53 MTHCA_MPT_FLAG_LOCAL_WRITE, 56 MTHCA_MPT_FLAG_LOCAL_READ |
54 &pd->ntmr); 57 MTHCA_MPT_FLAG_LOCAL_WRITE,
55 if (err) 58 &pd->ntmr);
56 mthca_free(&dev->pd_table.alloc, pd->pd_num); 59 if (err)
60 mthca_free(&dev->pd_table.alloc, pd->pd_num);
61 }
57 62
58 return err; 63 return err;
59} 64}
@@ -61,7 +66,8 @@ int mthca_pd_alloc(struct mthca_dev *dev, struct mthca_pd *pd)
61void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) 66void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd)
62{ 67{
63 might_sleep(); 68 might_sleep();
64 mthca_free_mr(dev, &pd->ntmr); 69 if (pd->privileged)
70 mthca_free_mr(dev, &pd->ntmr);
65 mthca_free(&dev->pd_table.alloc, pd->pd_num); 71 mthca_free(&dev->pd_table.alloc, pd->pd_num);
66} 72}
67 73
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 0b5adfd91597..7a58ce90e179 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -1,6 +1,7 @@
1/* 1/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. 3 * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
4 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * 5 *
5 * This software is available to you under a choice of one of two 6 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU 7 * licenses. You may choose to be licensed under the terms of the GNU
@@ -34,9 +35,12 @@
34 */ 35 */
35 36
36#include <ib_smi.h> 37#include <ib_smi.h>
38#include <linux/mm.h>
37 39
38#include "mthca_dev.h" 40#include "mthca_dev.h"
39#include "mthca_cmd.h" 41#include "mthca_cmd.h"
42#include "mthca_user.h"
43#include "mthca_memfree.h"
40 44
41static int mthca_query_device(struct ib_device *ibdev, 45static int mthca_query_device(struct ib_device *ibdev,
42 struct ib_device_attr *props) 46 struct ib_device_attr *props)
@@ -284,7 +288,78 @@ static int mthca_query_gid(struct ib_device *ibdev, u8 port,
284 return err; 288 return err;
285} 289}
286 290
287static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev) 291static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
292 struct ib_udata *udata)
293{
294 struct mthca_alloc_ucontext_resp uresp;
295 struct mthca_ucontext *context;
296 int err;
297
298 memset(&uresp, 0, sizeof uresp);
299
300 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
301 if (mthca_is_memfree(to_mdev(ibdev)))
302 uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size;
303 else
304 uresp.uarc_size = 0;
305
306 context = kmalloc(sizeof *context, GFP_KERNEL);
307 if (!context)
308 return ERR_PTR(-ENOMEM);
309
310 err = mthca_uar_alloc(to_mdev(ibdev), &context->uar);
311 if (err) {
312 kfree(context);
313 return ERR_PTR(err);
314 }
315
316 context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev));
317 if (IS_ERR(context->db_tab)) {
318 err = PTR_ERR(context->db_tab);
319 mthca_uar_free(to_mdev(ibdev), &context->uar);
320 kfree(context);
321 return ERR_PTR(err);
322 }
323
324 if (ib_copy_to_udata(udata, &uresp, sizeof uresp)) {
325 mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, context->db_tab);
326 mthca_uar_free(to_mdev(ibdev), &context->uar);
327 kfree(context);
328 return ERR_PTR(-EFAULT);
329 }
330
331 return &context->ibucontext;
332}
333
334static int mthca_dealloc_ucontext(struct ib_ucontext *context)
335{
336 mthca_cleanup_user_db_tab(to_mdev(context->device), &to_mucontext(context)->uar,
337 to_mucontext(context)->db_tab);
338 mthca_uar_free(to_mdev(context->device), &to_mucontext(context)->uar);
339 kfree(to_mucontext(context));
340
341 return 0;
342}
343
344static int mthca_mmap_uar(struct ib_ucontext *context,
345 struct vm_area_struct *vma)
346{
347 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
348 return -EINVAL;
349
350 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
351
352 if (remap_pfn_range(vma, vma->vm_start,
353 to_mucontext(context)->uar.pfn,
354 PAGE_SIZE, vma->vm_page_prot))
355 return -EAGAIN;
356
357 return 0;
358}
359
360static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev,
361 struct ib_ucontext *context,
362 struct ib_udata *udata)
288{ 363{
289 struct mthca_pd *pd; 364 struct mthca_pd *pd;
290 int err; 365 int err;
@@ -293,12 +368,20 @@ static struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev)
293 if (!pd) 368 if (!pd)
294 return ERR_PTR(-ENOMEM); 369 return ERR_PTR(-ENOMEM);
295 370
296 err = mthca_pd_alloc(to_mdev(ibdev), pd); 371 err = mthca_pd_alloc(to_mdev(ibdev), !context, pd);
297 if (err) { 372 if (err) {
298 kfree(pd); 373 kfree(pd);
299 return ERR_PTR(err); 374 return ERR_PTR(err);
300 } 375 }
301 376
377 if (context) {
378 if (ib_copy_to_udata(udata, &pd->pd_num, sizeof (__u32))) {
379 mthca_pd_free(to_mdev(ibdev), pd);
380 kfree(pd);
381 return ERR_PTR(-EFAULT);
382 }
383 }
384
302 return &pd->ibpd; 385 return &pd->ibpd;
303} 386}
304 387
@@ -338,8 +421,10 @@ static int mthca_ah_destroy(struct ib_ah *ah)
338} 421}
339 422
340static struct ib_qp *mthca_create_qp(struct ib_pd *pd, 423static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
341 struct ib_qp_init_attr *init_attr) 424 struct ib_qp_init_attr *init_attr,
425 struct ib_udata *udata)
342{ 426{
427 struct mthca_create_qp ucmd;
343 struct mthca_qp *qp; 428 struct mthca_qp *qp;
344 int err; 429 int err;
345 430
@@ -348,41 +433,82 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
348 case IB_QPT_UC: 433 case IB_QPT_UC:
349 case IB_QPT_UD: 434 case IB_QPT_UD:
350 { 435 {
436 struct mthca_ucontext *context;
437
351 qp = kmalloc(sizeof *qp, GFP_KERNEL); 438 qp = kmalloc(sizeof *qp, GFP_KERNEL);
352 if (!qp) 439 if (!qp)
353 return ERR_PTR(-ENOMEM); 440 return ERR_PTR(-ENOMEM);
354 441
355 qp->sq.max = init_attr->cap.max_send_wr; 442 if (pd->uobject) {
356 qp->rq.max = init_attr->cap.max_recv_wr; 443 context = to_mucontext(pd->uobject->context);
357 qp->sq.max_gs = init_attr->cap.max_send_sge; 444
358 qp->rq.max_gs = init_attr->cap.max_recv_sge; 445 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
446 return ERR_PTR(-EFAULT);
447
448 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
449 context->db_tab,
450 ucmd.sq_db_index, ucmd.sq_db_page);
451 if (err) {
452 kfree(qp);
453 return ERR_PTR(err);
454 }
455
456 err = mthca_map_user_db(to_mdev(pd->device), &context->uar,
457 context->db_tab,
458 ucmd.rq_db_index, ucmd.rq_db_page);
459 if (err) {
460 mthca_unmap_user_db(to_mdev(pd->device),
461 &context->uar,
462 context->db_tab,
463 ucmd.sq_db_index);
464 kfree(qp);
465 return ERR_PTR(err);
466 }
467
468 qp->mr.ibmr.lkey = ucmd.lkey;
469 qp->sq.db_index = ucmd.sq_db_index;
470 qp->rq.db_index = ucmd.rq_db_index;
471 }
359 472
360 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), 473 err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd),
361 to_mcq(init_attr->send_cq), 474 to_mcq(init_attr->send_cq),
362 to_mcq(init_attr->recv_cq), 475 to_mcq(init_attr->recv_cq),
363 init_attr->qp_type, init_attr->sq_sig_type, 476 init_attr->qp_type, init_attr->sq_sig_type,
364 qp); 477 &init_attr->cap, qp);
478
479 if (err && pd->uobject) {
480 context = to_mucontext(pd->uobject->context);
481
482 mthca_unmap_user_db(to_mdev(pd->device),
483 &context->uar,
484 context->db_tab,
485 ucmd.sq_db_index);
486 mthca_unmap_user_db(to_mdev(pd->device),
487 &context->uar,
488 context->db_tab,
489 ucmd.rq_db_index);
490 }
491
365 qp->ibqp.qp_num = qp->qpn; 492 qp->ibqp.qp_num = qp->qpn;
366 break; 493 break;
367 } 494 }
368 case IB_QPT_SMI: 495 case IB_QPT_SMI:
369 case IB_QPT_GSI: 496 case IB_QPT_GSI:
370 { 497 {
498 /* Don't allow userspace to create special QPs */
499 if (pd->uobject)
500 return ERR_PTR(-EINVAL);
501
371 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); 502 qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL);
372 if (!qp) 503 if (!qp)
373 return ERR_PTR(-ENOMEM); 504 return ERR_PTR(-ENOMEM);
374 505
375 qp->sq.max = init_attr->cap.max_send_wr;
376 qp->rq.max = init_attr->cap.max_recv_wr;
377 qp->sq.max_gs = init_attr->cap.max_send_sge;
378 qp->rq.max_gs = init_attr->cap.max_recv_sge;
379
380 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1; 506 qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_SMI ? 0 : 1;
381 507
382 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), 508 err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd),
383 to_mcq(init_attr->send_cq), 509 to_mcq(init_attr->send_cq),
384 to_mcq(init_attr->recv_cq), 510 to_mcq(init_attr->recv_cq),
385 init_attr->sq_sig_type, 511 init_attr->sq_sig_type, &init_attr->cap,
386 qp->ibqp.qp_num, init_attr->port_num, 512 qp->ibqp.qp_num, init_attr->port_num,
387 to_msqp(qp)); 513 to_msqp(qp));
388 break; 514 break;
@@ -397,42 +523,115 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
397 return ERR_PTR(err); 523 return ERR_PTR(err);
398 } 524 }
399 525
400 init_attr->cap.max_inline_data = 0; 526 init_attr->cap.max_inline_data = 0;
527 init_attr->cap.max_send_wr = qp->sq.max;
528 init_attr->cap.max_recv_wr = qp->rq.max;
529 init_attr->cap.max_send_sge = qp->sq.max_gs;
530 init_attr->cap.max_recv_sge = qp->rq.max_gs;
401 531
402 return &qp->ibqp; 532 return &qp->ibqp;
403} 533}
404 534
405static int mthca_destroy_qp(struct ib_qp *qp) 535static int mthca_destroy_qp(struct ib_qp *qp)
406{ 536{
537 if (qp->uobject) {
538 mthca_unmap_user_db(to_mdev(qp->device),
539 &to_mucontext(qp->uobject->context)->uar,
540 to_mucontext(qp->uobject->context)->db_tab,
541 to_mqp(qp)->sq.db_index);
542 mthca_unmap_user_db(to_mdev(qp->device),
543 &to_mucontext(qp->uobject->context)->uar,
544 to_mucontext(qp->uobject->context)->db_tab,
545 to_mqp(qp)->rq.db_index);
546 }
407 mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); 547 mthca_free_qp(to_mdev(qp->device), to_mqp(qp));
408 kfree(qp); 548 kfree(qp);
409 return 0; 549 return 0;
410} 550}
411 551
412static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries) 552static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries,
553 struct ib_ucontext *context,
554 struct ib_udata *udata)
413{ 555{
556 struct mthca_create_cq ucmd;
414 struct mthca_cq *cq; 557 struct mthca_cq *cq;
415 int nent; 558 int nent;
416 int err; 559 int err;
417 560
561 if (context) {
562 if (ib_copy_from_udata(&ucmd, udata, sizeof ucmd))
563 return ERR_PTR(-EFAULT);
564
565 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
566 to_mucontext(context)->db_tab,
567 ucmd.set_db_index, ucmd.set_db_page);
568 if (err)
569 return ERR_PTR(err);
570
571 err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
572 to_mucontext(context)->db_tab,
573 ucmd.arm_db_index, ucmd.arm_db_page);
574 if (err)
575 goto err_unmap_set;
576 }
577
418 cq = kmalloc(sizeof *cq, GFP_KERNEL); 578 cq = kmalloc(sizeof *cq, GFP_KERNEL);
419 if (!cq) 579 if (!cq) {
420 return ERR_PTR(-ENOMEM); 580 err = -ENOMEM;
581 goto err_unmap_arm;
582 }
583
584 if (context) {
585 cq->mr.ibmr.lkey = ucmd.lkey;
586 cq->set_ci_db_index = ucmd.set_db_index;
587 cq->arm_db_index = ucmd.arm_db_index;
588 }
421 589
422 for (nent = 1; nent <= entries; nent <<= 1) 590 for (nent = 1; nent <= entries; nent <<= 1)
423 ; /* nothing */ 591 ; /* nothing */
424 592
425 err = mthca_init_cq(to_mdev(ibdev), nent, cq); 593 err = mthca_init_cq(to_mdev(ibdev), nent,
426 if (err) { 594 context ? to_mucontext(context) : NULL,
427 kfree(cq); 595 context ? ucmd.pdn : to_mdev(ibdev)->driver_pd.pd_num,
428 cq = ERR_PTR(err); 596 cq);
597 if (err)
598 goto err_free;
599
600 if (context && ib_copy_to_udata(udata, &cq->cqn, sizeof (__u32))) {
601 mthca_free_cq(to_mdev(ibdev), cq);
602 goto err_free;
429 } 603 }
430 604
431 return &cq->ibcq; 605 return &cq->ibcq;
606
607err_free:
608 kfree(cq);
609
610err_unmap_arm:
611 if (context)
612 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
613 to_mucontext(context)->db_tab, ucmd.arm_db_index);
614
615err_unmap_set:
616 if (context)
617 mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar,
618 to_mucontext(context)->db_tab, ucmd.set_db_index);
619
620 return ERR_PTR(err);
432} 621}
433 622
434static int mthca_destroy_cq(struct ib_cq *cq) 623static int mthca_destroy_cq(struct ib_cq *cq)
435{ 624{
625 if (cq->uobject) {
626 mthca_unmap_user_db(to_mdev(cq->device),
627 &to_mucontext(cq->uobject->context)->uar,
628 to_mucontext(cq->uobject->context)->db_tab,
629 to_mcq(cq)->arm_db_index);
630 mthca_unmap_user_db(to_mdev(cq->device),
631 &to_mucontext(cq->uobject->context)->uar,
632 to_mucontext(cq->uobject->context)->db_tab,
633 to_mcq(cq)->set_ci_db_index);
634 }
436 mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); 635 mthca_free_cq(to_mdev(cq->device), to_mcq(cq));
437 kfree(cq); 636 kfree(cq);
438 637
@@ -568,6 +767,87 @@ static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd,
568 return &mr->ibmr; 767 return &mr->ibmr;
569} 768}
570 769
770static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, struct ib_umem *region,
771 int acc, struct ib_udata *udata)
772{
773 struct mthca_dev *dev = to_mdev(pd->device);
774 struct ib_umem_chunk *chunk;
775 struct mthca_mr *mr;
776 u64 *pages;
777 int shift, n, len;
778 int i, j, k;
779 int err = 0;
780
781 shift = ffs(region->page_size) - 1;
782
783 mr = kmalloc(sizeof *mr, GFP_KERNEL);
784 if (!mr)
785 return ERR_PTR(-ENOMEM);
786
787 n = 0;
788 list_for_each_entry(chunk, &region->chunk_list, list)
789 n += chunk->nents;
790
791 mr->mtt = mthca_alloc_mtt(dev, n);
792 if (IS_ERR(mr->mtt)) {
793 err = PTR_ERR(mr->mtt);
794 goto err;
795 }
796
797 pages = (u64 *) __get_free_page(GFP_KERNEL);
798 if (!pages) {
799 err = -ENOMEM;
800 goto err_mtt;
801 }
802
803 i = n = 0;
804
805 list_for_each_entry(chunk, &region->chunk_list, list)
806 for (j = 0; j < chunk->nmap; ++j) {
807 len = sg_dma_len(&chunk->page_list[j]) >> shift;
808 for (k = 0; k < len; ++k) {
809 pages[i++] = sg_dma_address(&chunk->page_list[j]) +
810 region->page_size * k;
811 /*
812 * Be friendly to WRITE_MTT command
813 * and leave two empty slots for the
814 * index and reserved fields of the
815 * mailbox.
816 */
817 if (i == PAGE_SIZE / sizeof (u64) - 2) {
818 err = mthca_write_mtt(dev, mr->mtt,
819 n, pages, i);
820 if (err)
821 goto mtt_done;
822 n += i;
823 i = 0;
824 }
825 }
826 }
827
828 if (i)
829 err = mthca_write_mtt(dev, mr->mtt, n, pages, i);
830mtt_done:
831 free_page((unsigned long) pages);
832 if (err)
833 goto err_mtt;
834
835 err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, shift, region->virt_base,
836 region->length, convert_access(acc), mr);
837
838 if (err)
839 goto err_mtt;
840
841 return &mr->ibmr;
842
843err_mtt:
844 mthca_free_mtt(dev, mr->mtt);
845
846err:
847 kfree(mr);
848 return ERR_PTR(err);
849}
850
571static int mthca_dereg_mr(struct ib_mr *mr) 851static int mthca_dereg_mr(struct ib_mr *mr)
572{ 852{
573 struct mthca_mr *mmr = to_mmr(mr); 853 struct mthca_mr *mmr = to_mmr(mr);
@@ -692,6 +972,8 @@ int mthca_register_device(struct mthca_dev *dev)
692 int i; 972 int i;
693 973
694 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); 974 strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX);
975 dev->ib_dev.owner = THIS_MODULE;
976
695 dev->ib_dev.node_type = IB_NODE_CA; 977 dev->ib_dev.node_type = IB_NODE_CA;
696 dev->ib_dev.phys_port_cnt = dev->limits.num_ports; 978 dev->ib_dev.phys_port_cnt = dev->limits.num_ports;
697 dev->ib_dev.dma_device = &dev->pdev->dev; 979 dev->ib_dev.dma_device = &dev->pdev->dev;
@@ -701,6 +983,9 @@ int mthca_register_device(struct mthca_dev *dev)
701 dev->ib_dev.modify_port = mthca_modify_port; 983 dev->ib_dev.modify_port = mthca_modify_port;
702 dev->ib_dev.query_pkey = mthca_query_pkey; 984 dev->ib_dev.query_pkey = mthca_query_pkey;
703 dev->ib_dev.query_gid = mthca_query_gid; 985 dev->ib_dev.query_gid = mthca_query_gid;
986 dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext;
987 dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext;
988 dev->ib_dev.mmap = mthca_mmap_uar;
704 dev->ib_dev.alloc_pd = mthca_alloc_pd; 989 dev->ib_dev.alloc_pd = mthca_alloc_pd;
705 dev->ib_dev.dealloc_pd = mthca_dealloc_pd; 990 dev->ib_dev.dealloc_pd = mthca_dealloc_pd;
706 dev->ib_dev.create_ah = mthca_ah_create; 991 dev->ib_dev.create_ah = mthca_ah_create;
@@ -713,6 +998,7 @@ int mthca_register_device(struct mthca_dev *dev)
713 dev->ib_dev.poll_cq = mthca_poll_cq; 998 dev->ib_dev.poll_cq = mthca_poll_cq;
714 dev->ib_dev.get_dma_mr = mthca_get_dma_mr; 999 dev->ib_dev.get_dma_mr = mthca_get_dma_mr;
715 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; 1000 dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr;
1001 dev->ib_dev.reg_user_mr = mthca_reg_user_mr;
716 dev->ib_dev.dereg_mr = mthca_dereg_mr; 1002 dev->ib_dev.dereg_mr = mthca_dereg_mr;
717 1003
718 if (dev->mthca_flags & MTHCA_FLAG_FMR) { 1004 if (dev->mthca_flags & MTHCA_FLAG_FMR) {
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index 4d976cccb1a8..1d032791cc8b 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -54,6 +55,14 @@ struct mthca_uar {
54 int index; 55 int index;
55}; 56};
56 57
58struct mthca_user_db_table;
59
60struct mthca_ucontext {
61 struct ib_ucontext ibucontext;
62 struct mthca_uar uar;
63 struct mthca_user_db_table *db_tab;
64};
65
57struct mthca_mtt; 66struct mthca_mtt;
58 67
59struct mthca_mr { 68struct mthca_mr {
@@ -83,6 +92,7 @@ struct mthca_pd {
83 u32 pd_num; 92 u32 pd_num;
84 atomic_t sqp_count; 93 atomic_t sqp_count;
85 struct mthca_mr ntmr; 94 struct mthca_mr ntmr;
95 int privileged;
86}; 96};
87 97
88struct mthca_eq { 98struct mthca_eq {
@@ -167,6 +177,7 @@ struct mthca_cq {
167 int cqn; 177 int cqn;
168 u32 cons_index; 178 u32 cons_index;
169 int is_direct; 179 int is_direct;
180 int is_kernel;
170 181
171 /* Next fields are Arbel only */ 182 /* Next fields are Arbel only */
172 int set_ci_db_index; 183 int set_ci_db_index;
@@ -236,6 +247,11 @@ struct mthca_sqp {
236 dma_addr_t header_dma; 247 dma_addr_t header_dma;
237}; 248};
238 249
250static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext)
251{
252 return container_of(ibucontext, struct mthca_ucontext, ibucontext);
253}
254
239static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr) 255static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibmr)
240{ 256{
241 return container_of(ibmr, struct mthca_fmr, ibmr); 257 return container_of(ibmr, struct mthca_fmr, ibmr);
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 163a8ef4186f..f7126b14d5ae 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved. 2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
3 * 4 *
4 * This software is available to you under a choice of one of two 5 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU 6 * licenses. You may choose to be licensed under the terms of the GNU
@@ -46,7 +47,9 @@ enum {
46 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, 47 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
47 MTHCA_ACK_REQ_FREQ = 10, 48 MTHCA_ACK_REQ_FREQ = 10,
48 MTHCA_FLIGHT_LIMIT = 9, 49 MTHCA_FLIGHT_LIMIT = 9,
49 MTHCA_UD_HEADER_SIZE = 72 /* largest UD header possible */ 50 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
51 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
52 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
50}; 53};
51 54
52enum { 55enum {
@@ -689,7 +692,11 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
689 692
690 /* leave arbel_sched_queue as 0 */ 693 /* leave arbel_sched_queue as 0 */
691 694
692 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index); 695 if (qp->ibqp.uobject)
696 qp_context->usr_page =
697 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
698 else
699 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
693 qp_context->local_qpn = cpu_to_be32(qp->qpn); 700 qp_context->local_qpn = cpu_to_be32(qp->qpn);
694 if (attr_mask & IB_QP_DEST_QPN) { 701 if (attr_mask & IB_QP_DEST_QPN) {
695 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num); 702 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
@@ -954,6 +961,15 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
954 961
955 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, 962 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
956 1 << qp->sq.wqe_shift); 963 1 << qp->sq.wqe_shift);
964
965 /*
966 * If this is a userspace QP, we don't actually have to
967 * allocate anything. All we need is to calculate the WQE
968 * sizes and the send_wqe_offset, so we're done now.
969 */
970 if (pd->ibpd.uobject)
971 return 0;
972
957 size = PAGE_ALIGN(qp->send_wqe_offset + 973 size = PAGE_ALIGN(qp->send_wqe_offset +
958 (qp->sq.max << qp->sq.wqe_shift)); 974 (qp->sq.max << qp->sq.wqe_shift));
959 975
@@ -1053,10 +1069,32 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
1053 return err; 1069 return err;
1054} 1070}
1055 1071
1056static int mthca_alloc_memfree(struct mthca_dev *dev, 1072static void mthca_free_wqe_buf(struct mthca_dev *dev,
1057 struct mthca_qp *qp) 1073 struct mthca_qp *qp)
1058{ 1074{
1059 int ret = 0; 1075 int i;
1076 int size = PAGE_ALIGN(qp->send_wqe_offset +
1077 (qp->sq.max << qp->sq.wqe_shift));
1078
1079 if (qp->is_direct) {
1080 dma_free_coherent(&dev->pdev->dev, size, qp->queue.direct.buf,
1081 pci_unmap_addr(&qp->queue.direct, mapping));
1082 } else {
1083 for (i = 0; i < size / PAGE_SIZE; ++i) {
1084 dma_free_coherent(&dev->pdev->dev, PAGE_SIZE,
1085 qp->queue.page_list[i].buf,
1086 pci_unmap_addr(&qp->queue.page_list[i],
1087 mapping));
1088 }
1089 }
1090
1091 kfree(qp->wrid);
1092}
1093
1094static int mthca_map_memfree(struct mthca_dev *dev,
1095 struct mthca_qp *qp)
1096{
1097 int ret;
1060 1098
1061 if (mthca_is_memfree(dev)) { 1099 if (mthca_is_memfree(dev)) {
1062 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); 1100 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
@@ -1067,35 +1105,15 @@ static int mthca_alloc_memfree(struct mthca_dev *dev,
1067 if (ret) 1105 if (ret)
1068 goto err_qpc; 1106 goto err_qpc;
1069 1107
1070 ret = mthca_table_get(dev, dev->qp_table.rdb_table, 1108 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1071 qp->qpn << dev->qp_table.rdb_shift); 1109 qp->qpn << dev->qp_table.rdb_shift);
1072 if (ret) 1110 if (ret)
1073 goto err_eqpc; 1111 goto err_eqpc;
1074
1075 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1076 qp->qpn, &qp->rq.db);
1077 if (qp->rq.db_index < 0) {
1078 ret = -ENOMEM;
1079 goto err_rdb;
1080 }
1081 1112
1082 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1083 qp->qpn, &qp->sq.db);
1084 if (qp->sq.db_index < 0) {
1085 ret = -ENOMEM;
1086 goto err_rq_db;
1087 }
1088 } 1113 }
1089 1114
1090 return 0; 1115 return 0;
1091 1116
1092err_rq_db:
1093 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1094
1095err_rdb:
1096 mthca_table_put(dev, dev->qp_table.rdb_table,
1097 qp->qpn << dev->qp_table.rdb_shift);
1098
1099err_eqpc: 1117err_eqpc:
1100 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); 1118 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1101 1119
@@ -1105,6 +1123,35 @@ err_qpc:
1105 return ret; 1123 return ret;
1106} 1124}
1107 1125
1126static void mthca_unmap_memfree(struct mthca_dev *dev,
1127 struct mthca_qp *qp)
1128{
1129 mthca_table_put(dev, dev->qp_table.rdb_table,
1130 qp->qpn << dev->qp_table.rdb_shift);
1131 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1132 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1133}
1134
1135static int mthca_alloc_memfree(struct mthca_dev *dev,
1136 struct mthca_qp *qp)
1137{
1138 int ret = 0;
1139
1140 if (mthca_is_memfree(dev)) {
1141 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1142 qp->qpn, &qp->rq.db);
1143 if (qp->rq.db_index < 0)
1144 return ret;
1145
1146 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1147 qp->qpn, &qp->sq.db);
1148 if (qp->sq.db_index < 0)
1149 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1150 }
1151
1152 return ret;
1153}
1154
1108static void mthca_free_memfree(struct mthca_dev *dev, 1155static void mthca_free_memfree(struct mthca_dev *dev,
1109 struct mthca_qp *qp) 1156 struct mthca_qp *qp)
1110{ 1157{
@@ -1112,11 +1159,6 @@ static void mthca_free_memfree(struct mthca_dev *dev,
1112 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); 1159 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1113 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); 1160 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1114 } 1161 }
1115
1116 mthca_table_put(dev, dev->qp_table.rdb_table,
1117 qp->qpn << dev->qp_table.rdb_shift);
1118 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1119 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1120} 1162}
1121 1163
1122static void mthca_wq_init(struct mthca_wq* wq) 1164static void mthca_wq_init(struct mthca_wq* wq)
@@ -1147,13 +1189,28 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1147 mthca_wq_init(&qp->sq); 1189 mthca_wq_init(&qp->sq);
1148 mthca_wq_init(&qp->rq); 1190 mthca_wq_init(&qp->rq);
1149 1191
1150 ret = mthca_alloc_memfree(dev, qp); 1192 ret = mthca_map_memfree(dev, qp);
1151 if (ret) 1193 if (ret)
1152 return ret; 1194 return ret;
1153 1195
1154 ret = mthca_alloc_wqe_buf(dev, pd, qp); 1196 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1155 if (ret) { 1197 if (ret) {
1156 mthca_free_memfree(dev, qp); 1198 mthca_unmap_memfree(dev, qp);
1199 return ret;
1200 }
1201
1202 /*
1203 * If this is a userspace QP, we're done now. The doorbells
1204 * will be allocated and buffers will be initialized in
1205 * userspace.
1206 */
1207 if (pd->ibpd.uobject)
1208 return 0;
1209
1210 ret = mthca_alloc_memfree(dev, qp);
1211 if (ret) {
1212 mthca_free_wqe_buf(dev, qp);
1213 mthca_unmap_memfree(dev, qp);
1157 return ret; 1214 return ret;
1158 } 1215 }
1159 1216
@@ -1186,22 +1243,39 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
1186 return 0; 1243 return 0;
1187} 1244}
1188 1245
1189static void mthca_align_qp_size(struct mthca_dev *dev, struct mthca_qp *qp) 1246static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1247 struct mthca_qp *qp)
1190{ 1248{
1191 int i; 1249 /* Sanity check QP size before proceeding */
1192 1250 if (cap->max_send_wr > 65536 || cap->max_recv_wr > 65536 ||
1193 if (!mthca_is_memfree(dev)) 1251 cap->max_send_sge > 64 || cap->max_recv_sge > 64)
1194 return; 1252 return -EINVAL;
1195 1253
1196 for (i = 0; 1 << i < qp->rq.max; ++i) 1254 if (mthca_is_memfree(dev)) {
1197 ; /* nothing */ 1255 qp->rq.max = cap->max_recv_wr ?
1256 roundup_pow_of_two(cap->max_recv_wr) : 0;
1257 qp->sq.max = cap->max_send_wr ?
1258 roundup_pow_of_two(cap->max_send_wr) : 0;
1259 } else {
1260 qp->rq.max = cap->max_recv_wr;
1261 qp->sq.max = cap->max_send_wr;
1262 }
1198 1263
1199 qp->rq.max = 1 << i; 1264 qp->rq.max_gs = cap->max_recv_sge;
1265 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1266 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1267 MTHCA_INLINE_CHUNK_SIZE) /
1268 sizeof (struct mthca_data_seg));
1200 1269
1201 for (i = 0; 1 << i < qp->sq.max; ++i) 1270 /*
1202 ; /* nothing */ 1271 * For MLX transport we need 2 extra S/G entries:
1272 * one for the header and one for the checksum at the end
1273 */
1274 if ((qp->transport == MLX && qp->sq.max_gs + 2 > dev->limits.max_sg) ||
1275 qp->sq.max_gs > dev->limits.max_sg || qp->rq.max_gs > dev->limits.max_sg)
1276 return -EINVAL;
1203 1277
1204 qp->sq.max = 1 << i; 1278 return 0;
1205} 1279}
1206 1280
1207int mthca_alloc_qp(struct mthca_dev *dev, 1281int mthca_alloc_qp(struct mthca_dev *dev,
@@ -1210,11 +1284,14 @@ int mthca_alloc_qp(struct mthca_dev *dev,
1210 struct mthca_cq *recv_cq, 1284 struct mthca_cq *recv_cq,
1211 enum ib_qp_type type, 1285 enum ib_qp_type type,
1212 enum ib_sig_type send_policy, 1286 enum ib_sig_type send_policy,
1287 struct ib_qp_cap *cap,
1213 struct mthca_qp *qp) 1288 struct mthca_qp *qp)
1214{ 1289{
1215 int err; 1290 int err;
1216 1291
1217 mthca_align_qp_size(dev, qp); 1292 err = mthca_set_qp_size(dev, cap, qp);
1293 if (err)
1294 return err;
1218 1295
1219 switch (type) { 1296 switch (type) {
1220 case IB_QPT_RC: qp->transport = RC; break; 1297 case IB_QPT_RC: qp->transport = RC; break;
@@ -1247,14 +1324,17 @@ int mthca_alloc_sqp(struct mthca_dev *dev,
1247 struct mthca_cq *send_cq, 1324 struct mthca_cq *send_cq,
1248 struct mthca_cq *recv_cq, 1325 struct mthca_cq *recv_cq,
1249 enum ib_sig_type send_policy, 1326 enum ib_sig_type send_policy,
1327 struct ib_qp_cap *cap,
1250 int qpn, 1328 int qpn,
1251 int port, 1329 int port,
1252 struct mthca_sqp *sqp) 1330 struct mthca_sqp *sqp)
1253{ 1331{
1254 int err = 0;
1255 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; 1332 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1333 int err;
1256 1334
1257 mthca_align_qp_size(dev, &sqp->qp); 1335 err = mthca_set_qp_size(dev, cap, &sqp->qp);
1336 if (err)
1337 return err;
1258 1338
1259 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE; 1339 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1260 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size, 1340 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
@@ -1313,8 +1393,6 @@ void mthca_free_qp(struct mthca_dev *dev,
1313 struct mthca_qp *qp) 1393 struct mthca_qp *qp)
1314{ 1394{
1315 u8 status; 1395 u8 status;
1316 int size;
1317 int i;
1318 struct mthca_cq *send_cq; 1396 struct mthca_cq *send_cq;
1319 struct mthca_cq *recv_cq; 1397 struct mthca_cq *recv_cq;
1320 1398
@@ -1344,31 +1422,22 @@ void mthca_free_qp(struct mthca_dev *dev,
1344 if (qp->state != IB_QPS_RESET) 1422 if (qp->state != IB_QPS_RESET)
1345 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); 1423 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
1346 1424
1347 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn); 1425 /*
1348 if (qp->ibqp.send_cq != qp->ibqp.recv_cq) 1426 * If this is a userspace QP, the buffers, MR, CQs and so on
1349 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn); 1427 * will be cleaned up in userspace, so all we have to do is
1350 1428 * unref the mem-free tables and free the QPN in our table.
1351 mthca_free_mr(dev, &qp->mr); 1429 */
1352 1430 if (!qp->ibqp.uobject) {
1353 size = PAGE_ALIGN(qp->send_wqe_offset + 1431 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn);
1354 (qp->sq.max << qp->sq.wqe_shift)); 1432 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1433 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn);
1355 1434
1356 if (qp->is_direct) { 1435 mthca_free_mr(dev, &qp->mr);
1357 pci_free_consistent(dev->pdev, size, 1436 mthca_free_memfree(dev, qp);
1358 qp->queue.direct.buf, 1437 mthca_free_wqe_buf(dev, qp);
1359 pci_unmap_addr(&qp->queue.direct, mapping));
1360 } else {
1361 for (i = 0; i < size / PAGE_SIZE; ++i) {
1362 pci_free_consistent(dev->pdev, PAGE_SIZE,
1363 qp->queue.page_list[i].buf,
1364 pci_unmap_addr(&qp->queue.page_list[i],
1365 mapping));
1366 }
1367 } 1438 }
1368 1439
1369 kfree(qp->wrid); 1440 mthca_unmap_memfree(dev, qp);
1370
1371 mthca_free_memfree(dev, qp);
1372 1441
1373 if (is_sqp(dev, qp)) { 1442 if (is_sqp(dev, qp)) {
1374 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); 1443 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
diff --git a/drivers/infiniband/hw/mthca/mthca_user.h b/drivers/infiniband/hw/mthca/mthca_user.h
new file mode 100644
index 000000000000..3024c1b4547d
--- /dev/null
+++ b/drivers/infiniband/hw/mthca/mthca_user.h
@@ -0,0 +1,81 @@
1/*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 */
34
35#ifndef MTHCA_USER_H
36#define MTHCA_USER_H
37
38#include <linux/types.h>
39
40/*
41 * Make sure that all structs defined in this file remain laid out so
42 * that they pack the same way on 32-bit and 64-bit architectures (to
43 * avoid incompatibility between 32-bit userspace and 64-bit kernels).
44 * In particular do not use pointer types -- pass pointers in __u64
45 * instead.
46 */
47
48struct mthca_alloc_ucontext_resp {
49 __u32 qp_tab_size;
50 __u32 uarc_size;
51};
52
53struct mthca_alloc_pd_resp {
54 __u32 pdn;
55 __u32 reserved;
56};
57
58struct mthca_create_cq {
59 __u32 lkey;
60 __u32 pdn;
61 __u64 arm_db_page;
62 __u64 set_db_page;
63 __u32 arm_db_index;
64 __u32 set_db_index;
65};
66
67struct mthca_create_cq_resp {
68 __u32 cqn;
69 __u32 reserved;
70};
71
72struct mthca_create_qp {
73 __u32 lkey;
74 __u32 reserved;
75 __u64 sq_db_page;
76 __u64 rq_db_page;
77 __u32 sq_db_index;
78 __u32 rq_db_index;
79};
80
81#endif /* MTHCA_USER_H */