diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 20:54:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-21 20:54:55 -0400 |
commit | c23ddf7857bdb2e8001b0a058603497c765a580d (patch) | |
tree | f1d826612114a17d6ab543b7095adf04b5ba614a /drivers/infiniband/hw/cxgb4/mem.c | |
parent | da4f58ffa08a7b7012fab9c205fa0f6ba40fec42 (diff) | |
parent | cc169165c82e14ea43e313f937a0a475ca97e588 (diff) |
Merge tag 'rdma-for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband
Pull InfiniBand/RDMA changes from Roland Dreier:
- Add ocrdma hardware driver for Emulex IB-over-Ethernet adapters
- Add generic and mlx4 support for "raw" QPs: allow suitably privileged
applications to send and receive arbitrary packets directly to/from
the hardware
- Add "doorbell drop" handling to the cxgb4 driver
- A fairly large batch of qib hardware driver changes
- A few fixes for lockdep-detected issues
- A few other miscellaneous fixes and cleanups
Fix up trivial conflict in drivers/net/ethernet/emulex/benet/be.h.
* tag 'rdma-for-3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband: (53 commits)
RDMA/cxgb4: Include vmalloc.h for vmalloc and vfree
IB/mlx4: Fix mlx4_ib_add() error flow
IB/core: Fix IB_SA_COMP_MASK macro
IB/iser: Fix error flow in iser ep connection establishment
IB/mlx4: Increase the number of vectors (EQs) available for ULPs
RDMA/cxgb4: Add query_qp support
RDMA/cxgb4: Remove kfifo usage
RDMA/cxgb4: Use vmalloc() for debugfs QP dump
RDMA/cxgb4: DB Drop Recovery for RDMA and LLD queues
RDMA/cxgb4: Disable interrupts in c4iw_ev_dispatch()
RDMA/cxgb4: Add DB Overflow Avoidance
RDMA/cxgb4: Add debugfs RDMA memory stats
cxgb4: DB Drop Recovery for RDMA and LLD queues
cxgb4: Common platform specific changes for DB Drop Recovery
cxgb4: Detect DB FULL events and notify RDMA ULD
RDMA/cxgb4: Drop peer_abort when no endpoint found
RDMA/cxgb4: Always wake up waiters in c4iw_peer_abort_intr()
mlx4_core: Change bitmap allocator to work in round-robin fashion
RDMA/nes: Don't call event handler if pointer is NULL
RDMA/nes: Fix for the ORD value of the connecting peer
...
Diffstat (limited to 'drivers/infiniband/hw/cxgb4/mem.c')
-rw-r--r-- | drivers/infiniband/hw/cxgb4/mem.c | 21 |
1 files changed, 14 insertions, 7 deletions
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index 40c835309e49..57e07c61ace2 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c | |||
@@ -131,10 +131,14 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, | |||
131 | stag_idx = (*stag) >> 8; | 131 | stag_idx = (*stag) >> 8; |
132 | 132 | ||
133 | if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { | 133 | if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { |
134 | stag_idx = c4iw_get_resource(&rdev->resource.tpt_fifo, | 134 | stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); |
135 | &rdev->resource.tpt_fifo_lock); | ||
136 | if (!stag_idx) | 135 | if (!stag_idx) |
137 | return -ENOMEM; | 136 | return -ENOMEM; |
137 | mutex_lock(&rdev->stats.lock); | ||
138 | rdev->stats.stag.cur += 32; | ||
139 | if (rdev->stats.stag.cur > rdev->stats.stag.max) | ||
140 | rdev->stats.stag.max = rdev->stats.stag.cur; | ||
141 | mutex_unlock(&rdev->stats.lock); | ||
138 | *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); | 142 | *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); |
139 | } | 143 | } |
140 | PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", | 144 | PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", |
@@ -165,9 +169,12 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, | |||
165 | (rdev->lldi.vr->stag.start >> 5), | 169 | (rdev->lldi.vr->stag.start >> 5), |
166 | sizeof(tpt), &tpt); | 170 | sizeof(tpt), &tpt); |
167 | 171 | ||
168 | if (reset_tpt_entry) | 172 | if (reset_tpt_entry) { |
169 | c4iw_put_resource(&rdev->resource.tpt_fifo, stag_idx, | 173 | c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); |
170 | &rdev->resource.tpt_fifo_lock); | 174 | mutex_lock(&rdev->stats.lock); |
175 | rdev->stats.stag.cur -= 32; | ||
176 | mutex_unlock(&rdev->stats.lock); | ||
177 | } | ||
171 | return err; | 178 | return err; |
172 | } | 179 | } |
173 | 180 | ||
@@ -686,8 +693,8 @@ int c4iw_dealloc_mw(struct ib_mw *mw) | |||
686 | mhp = to_c4iw_mw(mw); | 693 | mhp = to_c4iw_mw(mw); |
687 | rhp = mhp->rhp; | 694 | rhp = mhp->rhp; |
688 | mmid = (mw->rkey) >> 8; | 695 | mmid = (mw->rkey) >> 8; |
689 | deallocate_window(&rhp->rdev, mhp->attr.stag); | ||
690 | remove_handle(rhp, &rhp->mmidr, mmid); | 696 | remove_handle(rhp, &rhp->mmidr, mmid); |
697 | deallocate_window(&rhp->rdev, mhp->attr.stag); | ||
691 | kfree(mhp); | 698 | kfree(mhp); |
692 | PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); | 699 | PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); |
693 | return 0; | 700 | return 0; |
@@ -789,12 +796,12 @@ int c4iw_dereg_mr(struct ib_mr *ib_mr) | |||
789 | mhp = to_c4iw_mr(ib_mr); | 796 | mhp = to_c4iw_mr(ib_mr); |
790 | rhp = mhp->rhp; | 797 | rhp = mhp->rhp; |
791 | mmid = mhp->attr.stag >> 8; | 798 | mmid = mhp->attr.stag >> 8; |
799 | remove_handle(rhp, &rhp->mmidr, mmid); | ||
792 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, | 800 | dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, |
793 | mhp->attr.pbl_addr); | 801 | mhp->attr.pbl_addr); |
794 | if (mhp->attr.pbl_size) | 802 | if (mhp->attr.pbl_size) |
795 | c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, | 803 | c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, |
796 | mhp->attr.pbl_size << 3); | 804 | mhp->attr.pbl_size << 3); |
797 | remove_handle(rhp, &rhp->mmidr, mmid); | ||
798 | if (mhp->kva) | 805 | if (mhp->kva) |
799 | kfree((void *) (unsigned long) mhp->kva); | 806 | kfree((void *) (unsigned long) mhp->kva); |
800 | if (mhp->umem) | 807 | if (mhp->umem) |