diff options
author | Olaf Kirch <okir@lst.de> | 2008-04-29 16:46:53 -0400 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2008-04-29 16:46:53 -0400 |
commit | 0bfe151cc4049f3f304adf28b37ea5437d02ad96 (patch) | |
tree | 4e67ca79ec441896bd9cfde98268eb287987989e /drivers/infiniband | |
parent | bbdc2821db041fb07ffa52e4a0e1ebb5410790e9 (diff) |
IB/mthca: Avoid recycling old FMR R_Keys too soon
When a FMR is unmapped, mthca resets the map count to 0, and clears
the upper part of the R_Key which is used as the sequence counter.
This poses a problem for RDS, which uses ib_fmr_unmap as a fence
operation. RDS assumes that after issuing an unmap, the old R_Keys
will be invalid for a "reasonable" period of time. For instance,
Oracle processes uses shared memory buffers allocated from a pool of
buffers. When a process dies, we want to reclaim these buffers -- but
we must make sure there are no pending RDMA operations to/from those
buffers. The only way to achieve that is by using unmap and sync the
TPT.
However, when the sequence count is reset on unmap, there is a high
likelihood that a new mapping will be given the same R_Key that was
issued a few milliseconds ago.
To prevent this, don't reset the sequence count when unmapping a FMR.
Signed-off-by: Olaf Kirch <olaf.kirch@oracle.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_mr.c | 13 |
1 files changed, 0 insertions, 13 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c index 3538da16e3fe..820205dec560 100644 --- a/drivers/infiniband/hw/mthca/mthca_mr.c +++ b/drivers/infiniband/hw/mthca/mthca_mr.c | |||
@@ -818,15 +818,9 @@ int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |||
818 | 818 | ||
819 | void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | 819 | void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) |
820 | { | 820 | { |
821 | u32 key; | ||
822 | |||
823 | if (!fmr->maps) | 821 | if (!fmr->maps) |
824 | return; | 822 | return; |
825 | 823 | ||
826 | key = tavor_key_to_hw_index(fmr->ibmr.lkey); | ||
827 | key &= dev->limits.num_mpts - 1; | ||
828 | fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key); | ||
829 | |||
830 | fmr->maps = 0; | 824 | fmr->maps = 0; |
831 | 825 | ||
832 | writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); | 826 | writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); |
@@ -834,16 +828,9 @@ void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | |||
834 | 828 | ||
835 | void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) | 829 | void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) |
836 | { | 830 | { |
837 | u32 key; | ||
838 | |||
839 | if (!fmr->maps) | 831 | if (!fmr->maps) |
840 | return; | 832 | return; |
841 | 833 | ||
842 | key = arbel_key_to_hw_index(fmr->ibmr.lkey); | ||
843 | key &= dev->limits.num_mpts - 1; | ||
844 | key = adjust_key(dev, key); | ||
845 | fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key); | ||
846 | |||
847 | fmr->maps = 0; | 834 | fmr->maps = 0; |
848 | 835 | ||
849 | *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; | 836 | *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; |