aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_verbs.h
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@intel.com>2012-06-27 18:33:12 -0400
committerRoland Dreier <roland@purestorage.com>2012-07-08 21:05:19 -0400
commit6a82649f217023863d6b1740017e6c3dd6685327 (patch)
treeb11e8bd8993d4db88a386336172faf0215ba724d /drivers/infiniband/hw/qib/qib_verbs.h
parent354dff1bd8ccd41b6e8421226d586d35e7fb8920 (diff)
IB/qib: Avoid returning EBUSY from MR deregister
A timing issue can occur where qib_mr_dereg can return -EBUSY if the MR use count is not zero. This can occur if the MR is de-registered while RDMA read response packets are being progressed from the SDMA ring. The suspicion is that the peer sent an RDMA read request, which has already been copied across to the peer. The peer sees the completion of his request and then communicates to the responder that the MR is not needed any longer. The responder tries to de-register the MR, catching some responses remaining in the SDMA ring holding the MR use count. The code now uses a get/put paradigm to track MR use counts and coordinates with the MR de-registration process using a completion when the count has reached zero. A timeout on the delay is in place to catch other EBUSY issues. The reference count protocol is as follows: - The return to the user counts as 1 - A reference from the lk_table or the qib_ibdev counts as 1. - Transient I/O operations increase/decrease as necessary A lot of code duplication has been folded into the new routines init_qib_mregion() and deinit_qib_mregion(). Additionally, explicit initialization of fields to zero is now handled by kzalloc(). Also, duplicated code 'while.*num_sge' that decrements reference counts have been consolidated in qib_put_ss(). Reviewed-by: Ramkrishna Vepa <ramkrishna.vepa@intel.com> Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com> Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_verbs.h')
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.h28
1 files changed, 26 insertions, 2 deletions
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h
index 487606024659..4a2277bc059e 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.h
+++ b/drivers/infiniband/hw/qib/qib_verbs.h
@@ -41,6 +41,7 @@
41#include <linux/interrupt.h> 41#include <linux/interrupt.h>
42#include <linux/kref.h> 42#include <linux/kref.h>
43#include <linux/workqueue.h> 43#include <linux/workqueue.h>
44#include <linux/completion.h>
44#include <rdma/ib_pack.h> 45#include <rdma/ib_pack.h>
45#include <rdma/ib_user_verbs.h> 46#include <rdma/ib_user_verbs.h>
46 47
@@ -302,6 +303,8 @@ struct qib_mregion {
302 u32 max_segs; /* number of qib_segs in all the arrays */ 303 u32 max_segs; /* number of qib_segs in all the arrays */
303 u32 mapsz; /* size of the map array */ 304 u32 mapsz; /* size of the map array */
304 u8 page_shift; /* 0 - non unform/non powerof2 sizes */ 305 u8 page_shift; /* 0 - non unform/non powerof2 sizes */
306 u8 lkey_published; /* in global table */
307 struct completion comp; /* complete when refcount goes to zero */
305 atomic_t refcount; 308 atomic_t refcount;
306 struct qib_segarray *map[0]; /* the segments */ 309 struct qib_segarray *map[0]; /* the segments */
307}; 310};
@@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr);
944void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, 947void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr,
945 int has_grh, void *data, u32 tlen, struct qib_qp *qp); 948 int has_grh, void *data, u32 tlen, struct qib_qp *qp);
946 949
947int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); 950int qib_alloc_lkey(struct qib_mregion *mr, int dma_region);
948 951
949int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); 952void qib_free_lkey(struct qib_mregion *mr);
950 953
951int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, 954int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
952 struct qib_sge *isge, struct ib_sge *sge, int acc); 955 struct qib_sge *isge, struct ib_sge *sge, int acc);
@@ -1014,6 +1017,27 @@ int qib_unmap_fmr(struct list_head *fmr_list);
1014 1017
1015int qib_dealloc_fmr(struct ib_fmr *ibfmr); 1018int qib_dealloc_fmr(struct ib_fmr *ibfmr);
1016 1019
1020static inline void qib_get_mr(struct qib_mregion *mr)
1021{
1022 atomic_inc(&mr->refcount);
1023}
1024
1025static inline void qib_put_mr(struct qib_mregion *mr)
1026{
1027 if (unlikely(atomic_dec_and_test(&mr->refcount)))
1028 complete(&mr->comp);
1029}
1030
1031static inline void qib_put_ss(struct qib_sge_state *ss)
1032{
1033 while (ss->num_sge) {
1034 qib_put_mr(ss->sge.mr);
1035 if (--ss->num_sge)
1036 ss->sge = *ss->sg_list++;
1037 }
1038}
1039
1040
1017void qib_release_mmap_info(struct kref *ref); 1041void qib_release_mmap_info(struct kref *ref);
1018 1042
1019struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, 1043struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size,