diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-07-08 15:20:54 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-07-08 15:20:54 -0400 |
| commit | e467e104bb7482170b79f516d2025e7cfcaaa733 (patch) | |
| tree | d9de9b008b2cec2e5f46e7bbc83cef50d3d5d288 /drivers/infiniband/hw/cxgb4/cq.c | |
| parent | b9f399594d12e353dcb609c25219bdaa76c2a050 (diff) | |
| parent | 9e770044a0f08a6dcf245152ec1575f7cb0b9631 (diff) | |
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
IPoIB: Fix world-writable child interface control sysfs attributes
IB/qib: Clean up properly if qib_init() fails
IB/qib: Completion queue callback needs to be single threaded
IB/qib: Update 7322 serdes tables
IB/qib: Clear 6120 hardware error register
IB/qib: Clear eager buffer memory for each new process
IB/qib: Mask hardware error during link reset
IB/qib: Don't mark VL15 bufs as WC to avoid a rare 7322 chip problem
RDMA/cxgb4: Derive smac_idx from port viid
RDMA/cxgb4: Avoid false GTS CIDX_INC overflows
RDMA/cxgb4: Don't call abort_connection() for active connect failures
RDMA/cxgb4: Use the DMA state API instead of the pci equivalents
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/cq.c')
| -rw-r--r-- | drivers/infiniband/hw/cxgb4/cq.c | 31 |
1 files changed, 23 insertions, 8 deletions
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c index 2447f5295482..fac5c6e68011 100644 --- a/drivers/infiniband/hw/cxgb4/cq.c +++ b/drivers/infiniband/hw/cxgb4/cq.c | |||
| @@ -77,7 +77,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
| 77 | kfree(cq->sw_queue); | 77 | kfree(cq->sw_queue); |
| 78 | dma_free_coherent(&(rdev->lldi.pdev->dev), | 78 | dma_free_coherent(&(rdev->lldi.pdev->dev), |
| 79 | cq->memsize, cq->queue, | 79 | cq->memsize, cq->queue, |
| 80 | pci_unmap_addr(cq, mapping)); | 80 | dma_unmap_addr(cq, mapping)); |
| 81 | c4iw_put_cqid(rdev, cq->cqid, uctx); | 81 | c4iw_put_cqid(rdev, cq->cqid, uctx); |
| 82 | return ret; | 82 | return ret; |
| 83 | } | 83 | } |
| @@ -112,7 +112,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
| 112 | ret = -ENOMEM; | 112 | ret = -ENOMEM; |
| 113 | goto err3; | 113 | goto err3; |
| 114 | } | 114 | } |
| 115 | pci_unmap_addr_set(cq, mapping, cq->dma_addr); | 115 | dma_unmap_addr_set(cq, mapping, cq->dma_addr); |
| 116 | memset(cq->queue, 0, cq->memsize); | 116 | memset(cq->queue, 0, cq->memsize); |
| 117 | 117 | ||
| 118 | /* build fw_ri_res_wr */ | 118 | /* build fw_ri_res_wr */ |
| @@ -179,7 +179,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq, | |||
| 179 | return 0; | 179 | return 0; |
| 180 | err4: | 180 | err4: |
| 181 | dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, | 181 | dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, |
| 182 | pci_unmap_addr(cq, mapping)); | 182 | dma_unmap_addr(cq, mapping)); |
| 183 | err3: | 183 | err3: |
| 184 | kfree(cq->sw_queue); | 184 | kfree(cq->sw_queue); |
| 185 | err2: | 185 | err2: |
| @@ -764,7 +764,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
| 764 | struct c4iw_create_cq_resp uresp; | 764 | struct c4iw_create_cq_resp uresp; |
| 765 | struct c4iw_ucontext *ucontext = NULL; | 765 | struct c4iw_ucontext *ucontext = NULL; |
| 766 | int ret; | 766 | int ret; |
| 767 | size_t memsize; | 767 | size_t memsize, hwentries; |
| 768 | struct c4iw_mm_entry *mm, *mm2; | 768 | struct c4iw_mm_entry *mm, *mm2; |
| 769 | 769 | ||
| 770 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); | 770 | PDBG("%s ib_dev %p entries %d\n", __func__, ibdev, entries); |
| @@ -788,14 +788,29 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
| 788 | * entries must be multiple of 16 for HW. | 788 | * entries must be multiple of 16 for HW. |
| 789 | */ | 789 | */ |
| 790 | entries = roundup(entries, 16); | 790 | entries = roundup(entries, 16); |
| 791 | memsize = entries * sizeof *chp->cq.queue; | 791 | |
| 792 | /* | ||
| 793 | * Make actual HW queue 2x to avoid cdix_inc overflows. | ||
| 794 | */ | ||
| 795 | hwentries = entries * 2; | ||
| 796 | |||
| 797 | /* | ||
| 798 | * Make HW queue at least 64 entries so GTS updates aren't too | ||
| 799 | * frequent. | ||
| 800 | */ | ||
| 801 | if (hwentries < 64) | ||
| 802 | hwentries = 64; | ||
| 803 | |||
| 804 | memsize = hwentries * sizeof *chp->cq.queue; | ||
| 792 | 805 | ||
| 793 | /* | 806 | /* |
| 794 | * memsize must be a multiple of the page size if its a user cq. | 807 | * memsize must be a multiple of the page size if its a user cq. |
| 795 | */ | 808 | */ |
| 796 | if (ucontext) | 809 | if (ucontext) { |
| 797 | memsize = roundup(memsize, PAGE_SIZE); | 810 | memsize = roundup(memsize, PAGE_SIZE); |
| 798 | chp->cq.size = entries; | 811 | hwentries = memsize / sizeof *chp->cq.queue; |
| 812 | } | ||
| 813 | chp->cq.size = hwentries; | ||
| 799 | chp->cq.memsize = memsize; | 814 | chp->cq.memsize = memsize; |
| 800 | 815 | ||
| 801 | ret = create_cq(&rhp->rdev, &chp->cq, | 816 | ret = create_cq(&rhp->rdev, &chp->cq, |
| @@ -805,7 +820,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries, | |||
| 805 | 820 | ||
| 806 | chp->rhp = rhp; | 821 | chp->rhp = rhp; |
| 807 | chp->cq.size--; /* status page */ | 822 | chp->cq.size--; /* status page */ |
| 808 | chp->ibcq.cqe = chp->cq.size - 1; | 823 | chp->ibcq.cqe = entries - 2; |
| 809 | spin_lock_init(&chp->lock); | 824 | spin_lock_init(&chp->lock); |
| 810 | atomic_set(&chp->refcnt, 1); | 825 | atomic_set(&chp->refcnt, 1); |
| 811 | init_waitqueue_head(&chp->wait); | 826 | init_waitqueue_head(&chp->wait); |
