diff options
author | Mike Marciniszyn <mike.marciniszyn@intel.com> | 2012-06-27 18:33:12 -0400 |
---|---|---|
committer | Roland Dreier <roland@purestorage.com> | 2012-07-08 21:05:19 -0400 |
commit | 6a82649f217023863d6b1740017e6c3dd6685327 (patch) | |
tree | b11e8bd8993d4db88a386336172faf0215ba724d /drivers/infiniband | |
parent | 354dff1bd8ccd41b6e8421226d586d35e7fb8920 (diff) |
IB/qib: Avoid returning EBUSY from MR deregister
A timing issue can occur where qib_mr_dereg can return -EBUSY if the
MR use count is not zero.
This can occur if the MR is de-registered while RDMA read response
packets are being progressed from the SDMA ring. The suspicion is
that the peer sent an RDMA read request, which has already been copied
across to the peer. The peer sees the completion of his request and
then communicates to the responder that the MR is not needed any
longer. The responder tries to de-register the MR, catching some
responses remaining in the SDMA ring holding the MR use count.
The code now uses a get/put paradigm to track MR use counts and
coordinates with the MR de-registration process using a completion
when the count has reached zero. A timeout on the delay is in place
to catch other EBUSY issues.
The reference count protocol is as follows:
- The return to the user counts as 1
- A reference from the lk_table or the qib_ibdev counts as 1.
- Transient I/O operations increase/decrease as necessary
A lot of code duplication has been folded into the new routines
init_qib_mregion() and deinit_qib_mregion(). Additionally, explicit
initialization of fields to zero is now handled by kzalloc().
Also, duplicated code 'while.*num_sge' that decrements reference
counts have been consolidated in qib_put_ss().
Reviewed-by: Ramkrishna Vepa <ramkrishna.vepa@intel.com>
Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: Roland Dreier <roland@purestorage.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_keys.c | 84 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_mr.c | 242 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_qp.c | 21 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_rc.c | 24 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_ruc.c | 14 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_uc.c | 33 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_ud.c | 12 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.c | 10 | ||||
-rw-r--r-- | drivers/infiniband/hw/qib/qib_verbs.h | 28 |
9 files changed, 244 insertions, 224 deletions
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c index 8fd19a47df0c..8b5ee3aa8e3d 100644 --- a/drivers/infiniband/hw/qib/qib_keys.c +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
@@ -35,21 +35,40 @@ | |||
35 | 35 | ||
36 | /** | 36 | /** |
37 | * qib_alloc_lkey - allocate an lkey | 37 | * qib_alloc_lkey - allocate an lkey |
38 | * @rkt: lkey table in which to allocate the lkey | ||
39 | * @mr: memory region that this lkey protects | 38 | * @mr: memory region that this lkey protects |
39 | * @dma_region: 0->normal key, 1->restricted DMA key | ||
40 | * | ||
41 | * Returns 0 if successful, otherwise returns -errno. | ||
42 | * | ||
43 | * Increments mr reference count and sets published | ||
44 | * as required. | ||
45 | * | ||
46 | * Sets the lkey field mr for non-dma regions. | ||
40 | * | 47 | * |
41 | * Returns 1 if successful, otherwise returns 0. | ||
42 | */ | 48 | */ |
43 | 49 | ||
44 | int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | 50 | int qib_alloc_lkey(struct qib_mregion *mr, int dma_region) |
45 | { | 51 | { |
46 | unsigned long flags; | 52 | unsigned long flags; |
47 | u32 r; | 53 | u32 r; |
48 | u32 n; | 54 | u32 n; |
49 | int ret; | 55 | int ret = 0; |
56 | struct qib_ibdev *dev = to_idev(mr->pd->device); | ||
57 | struct qib_lkey_table *rkt = &dev->lk_table; | ||
50 | 58 | ||
51 | spin_lock_irqsave(&rkt->lock, flags); | 59 | spin_lock_irqsave(&rkt->lock, flags); |
52 | 60 | ||
61 | /* special case for dma_mr lkey == 0 */ | ||
62 | if (dma_region) { | ||
63 | /* should the dma_mr be relative to the pd? */ | ||
64 | if (!dev->dma_mr) { | ||
65 | qib_get_mr(mr); | ||
66 | dev->dma_mr = mr; | ||
67 | mr->lkey_published = 1; | ||
68 | } | ||
69 | goto success; | ||
70 | } | ||
71 | |||
53 | /* Find the next available LKEY */ | 72 | /* Find the next available LKEY */ |
54 | r = rkt->next; | 73 | r = rkt->next; |
55 | n = r; | 74 | n = r; |
@@ -57,11 +76,8 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | |||
57 | if (rkt->table[r] == NULL) | 76 | if (rkt->table[r] == NULL) |
58 | break; | 77 | break; |
59 | r = (r + 1) & (rkt->max - 1); | 78 | r = (r + 1) & (rkt->max - 1); |
60 | if (r == n) { | 79 | if (r == n) |
61 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
62 | ret = 0; | ||
63 | goto bail; | 80 | goto bail; |
64 | } | ||
65 | } | 81 | } |
66 | rkt->next = (r + 1) & (rkt->max - 1); | 82 | rkt->next = (r + 1) & (rkt->max - 1); |
67 | /* | 83 | /* |
@@ -76,46 +92,50 @@ int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | |||
76 | mr->lkey |= 1 << 8; | 92 | mr->lkey |= 1 << 8; |
77 | rkt->gen++; | 93 | rkt->gen++; |
78 | } | 94 | } |
95 | qib_get_mr(mr); | ||
79 | rkt->table[r] = mr; | 96 | rkt->table[r] = mr; |
97 | mr->lkey_published = 1; | ||
98 | success: | ||
80 | spin_unlock_irqrestore(&rkt->lock, flags); | 99 | spin_unlock_irqrestore(&rkt->lock, flags); |
81 | 100 | out: | |
82 | ret = 1; | ||
83 | |||
84 | bail: | ||
85 | return ret; | 101 | return ret; |
102 | bail: | ||
103 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
104 | ret = -ENOMEM; | ||
105 | goto out; | ||
86 | } | 106 | } |
87 | 107 | ||
88 | /** | 108 | /** |
89 | * qib_free_lkey - free an lkey | 109 | * qib_free_lkey - free an lkey |
90 | * @rkt: table from which to free the lkey | 110 | * @mr: mr to free from tables |
91 | * @lkey: lkey id to free | ||
92 | */ | 111 | */ |
93 | int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr) | 112 | void qib_free_lkey(struct qib_mregion *mr) |
94 | { | 113 | { |
95 | unsigned long flags; | 114 | unsigned long flags; |
96 | u32 lkey = mr->lkey; | 115 | u32 lkey = mr->lkey; |
97 | u32 r; | 116 | u32 r; |
98 | int ret; | 117 | struct qib_ibdev *dev = to_idev(mr->pd->device); |
118 | struct qib_lkey_table *rkt = &dev->lk_table; | ||
119 | |||
120 | spin_lock_irqsave(&rkt->lock, flags); | ||
121 | if (!mr->lkey_published) | ||
122 | goto out; | ||
123 | mr->lkey_published = 0; | ||
124 | |||
99 | 125 | ||
100 | spin_lock_irqsave(&dev->lk_table.lock, flags); | 126 | spin_lock_irqsave(&dev->lk_table.lock, flags); |
101 | if (lkey == 0) { | 127 | if (lkey == 0) { |
102 | if (dev->dma_mr && dev->dma_mr == mr) { | 128 | if (dev->dma_mr && dev->dma_mr == mr) { |
103 | ret = atomic_read(&dev->dma_mr->refcount); | 129 | qib_put_mr(dev->dma_mr); |
104 | if (!ret) | 130 | dev->dma_mr = NULL; |
105 | dev->dma_mr = NULL; | 131 | } |
106 | } else | ||
107 | ret = 0; | ||
108 | } else { | 132 | } else { |
109 | r = lkey >> (32 - ib_qib_lkey_table_size); | 133 | r = lkey >> (32 - ib_qib_lkey_table_size); |
110 | ret = atomic_read(&dev->lk_table.table[r]->refcount); | 134 | qib_put_mr(dev->dma_mr); |
111 | if (!ret) | 135 | rkt->table[r] = NULL; |
112 | dev->lk_table.table[r] = NULL; | ||
113 | } | 136 | } |
137 | out: | ||
114 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); | 138 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); |
115 | |||
116 | if (ret) | ||
117 | ret = -EBUSY; | ||
118 | return ret; | ||
119 | } | 139 | } |
120 | 140 | ||
121 | /** | 141 | /** |
@@ -150,7 +170,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
150 | goto bail; | 170 | goto bail; |
151 | if (!dev->dma_mr) | 171 | if (!dev->dma_mr) |
152 | goto bail; | 172 | goto bail; |
153 | atomic_inc(&dev->dma_mr->refcount); | 173 | qib_get_mr(dev->dma_mr); |
154 | spin_unlock_irqrestore(&rkt->lock, flags); | 174 | spin_unlock_irqrestore(&rkt->lock, flags); |
155 | 175 | ||
156 | isge->mr = dev->dma_mr; | 176 | isge->mr = dev->dma_mr; |
@@ -171,7 +191,7 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | |||
171 | off + sge->length > mr->length || | 191 | off + sge->length > mr->length || |
172 | (mr->access_flags & acc) != acc)) | 192 | (mr->access_flags & acc) != acc)) |
173 | goto bail; | 193 | goto bail; |
174 | atomic_inc(&mr->refcount); | 194 | qib_get_mr(mr); |
175 | spin_unlock_irqrestore(&rkt->lock, flags); | 195 | spin_unlock_irqrestore(&rkt->lock, flags); |
176 | 196 | ||
177 | off += mr->offset; | 197 | off += mr->offset; |
@@ -245,7 +265,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
245 | goto bail; | 265 | goto bail; |
246 | if (!dev->dma_mr) | 266 | if (!dev->dma_mr) |
247 | goto bail; | 267 | goto bail; |
248 | atomic_inc(&dev->dma_mr->refcount); | 268 | qib_get_mr(dev->dma_mr); |
249 | spin_unlock_irqrestore(&rkt->lock, flags); | 269 | spin_unlock_irqrestore(&rkt->lock, flags); |
250 | 270 | ||
251 | sge->mr = dev->dma_mr; | 271 | sge->mr = dev->dma_mr; |
@@ -265,7 +285,7 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | |||
265 | if (unlikely(vaddr < mr->iova || off + len > mr->length || | 285 | if (unlikely(vaddr < mr->iova || off + len > mr->length || |
266 | (mr->access_flags & acc) == 0)) | 286 | (mr->access_flags & acc) == 0)) |
267 | goto bail; | 287 | goto bail; |
268 | atomic_inc(&mr->refcount); | 288 | qib_get_mr(mr); |
269 | spin_unlock_irqrestore(&rkt->lock, flags); | 289 | spin_unlock_irqrestore(&rkt->lock, flags); |
270 | 290 | ||
271 | off += mr->offset; | 291 | off += mr->offset; |
diff --git a/drivers/infiniband/hw/qib/qib_mr.c b/drivers/infiniband/hw/qib/qib_mr.c index 08944e2ee334..6a2028a56e3d 100644 --- a/drivers/infiniband/hw/qib/qib_mr.c +++ b/drivers/infiniband/hw/qib/qib_mr.c | |||
@@ -47,6 +47,43 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) | |||
47 | return container_of(ibfmr, struct qib_fmr, ibfmr); | 47 | return container_of(ibfmr, struct qib_fmr, ibfmr); |
48 | } | 48 | } |
49 | 49 | ||
50 | static int init_qib_mregion(struct qib_mregion *mr, struct ib_pd *pd, | ||
51 | int count) | ||
52 | { | ||
53 | int m, i = 0; | ||
54 | int rval = 0; | ||
55 | |||
56 | m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; | ||
57 | for (; i < m; i++) { | ||
58 | mr->map[i] = kzalloc(sizeof *mr->map[0], GFP_KERNEL); | ||
59 | if (!mr->map[i]) | ||
60 | goto bail; | ||
61 | } | ||
62 | mr->mapsz = m; | ||
63 | init_completion(&mr->comp); | ||
64 | /* count returning the ptr to user */ | ||
65 | atomic_set(&mr->refcount, 1); | ||
66 | mr->pd = pd; | ||
67 | mr->max_segs = count; | ||
68 | out: | ||
69 | return rval; | ||
70 | bail: | ||
71 | while (i) | ||
72 | kfree(mr->map[--i]); | ||
73 | rval = -ENOMEM; | ||
74 | goto out; | ||
75 | } | ||
76 | |||
77 | static void deinit_qib_mregion(struct qib_mregion *mr) | ||
78 | { | ||
79 | int i = mr->mapsz; | ||
80 | |||
81 | mr->mapsz = 0; | ||
82 | while (i) | ||
83 | kfree(mr->map[--i]); | ||
84 | } | ||
85 | |||
86 | |||
50 | /** | 87 | /** |
51 | * qib_get_dma_mr - get a DMA memory region | 88 | * qib_get_dma_mr - get a DMA memory region |
52 | * @pd: protection domain for this memory region | 89 | * @pd: protection domain for this memory region |
@@ -58,10 +95,9 @@ static inline struct qib_fmr *to_ifmr(struct ib_fmr *ibfmr) | |||
58 | */ | 95 | */ |
59 | struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) | 96 | struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) |
60 | { | 97 | { |
61 | struct qib_ibdev *dev = to_idev(pd->device); | 98 | struct qib_mr *mr = NULL; |
62 | struct qib_mr *mr; | ||
63 | struct ib_mr *ret; | 99 | struct ib_mr *ret; |
64 | unsigned long flags; | 100 | int rval; |
65 | 101 | ||
66 | if (to_ipd(pd)->user) { | 102 | if (to_ipd(pd)->user) { |
67 | ret = ERR_PTR(-EPERM); | 103 | ret = ERR_PTR(-EPERM); |
@@ -74,61 +110,64 @@ struct ib_mr *qib_get_dma_mr(struct ib_pd *pd, int acc) | |||
74 | goto bail; | 110 | goto bail; |
75 | } | 111 | } |
76 | 112 | ||
77 | mr->mr.access_flags = acc; | 113 | rval = init_qib_mregion(&mr->mr, pd, 0); |
78 | atomic_set(&mr->mr.refcount, 0); | 114 | if (rval) { |
115 | ret = ERR_PTR(rval); | ||
116 | goto bail; | ||
117 | } | ||
79 | 118 | ||
80 | spin_lock_irqsave(&dev->lk_table.lock, flags); | ||
81 | if (!dev->dma_mr) | ||
82 | dev->dma_mr = &mr->mr; | ||
83 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); | ||
84 | 119 | ||
120 | rval = qib_alloc_lkey(&mr->mr, 1); | ||
121 | if (rval) { | ||
122 | ret = ERR_PTR(rval); | ||
123 | goto bail_mregion; | ||
124 | } | ||
125 | |||
126 | mr->mr.access_flags = acc; | ||
85 | ret = &mr->ibmr; | 127 | ret = &mr->ibmr; |
128 | done: | ||
129 | return ret; | ||
86 | 130 | ||
131 | bail_mregion: | ||
132 | deinit_qib_mregion(&mr->mr); | ||
87 | bail: | 133 | bail: |
88 | return ret; | 134 | kfree(mr); |
135 | goto done; | ||
89 | } | 136 | } |
90 | 137 | ||
91 | static struct qib_mr *alloc_mr(int count, struct qib_lkey_table *lk_table) | 138 | static struct qib_mr *alloc_mr(int count, struct ib_pd *pd) |
92 | { | 139 | { |
93 | struct qib_mr *mr; | 140 | struct qib_mr *mr; |
94 | int m, i = 0; | 141 | int rval = -ENOMEM; |
142 | int m; | ||
95 | 143 | ||
96 | /* Allocate struct plus pointers to first level page tables. */ | 144 | /* Allocate struct plus pointers to first level page tables. */ |
97 | m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; | 145 | m = (count + QIB_SEGSZ - 1) / QIB_SEGSZ; |
98 | mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); | 146 | mr = kzalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); |
99 | if (!mr) | 147 | if (!mr) |
100 | goto done; | 148 | goto bail; |
101 | |||
102 | /* Allocate first level page tables. */ | ||
103 | for (; i < m; i++) { | ||
104 | mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); | ||
105 | if (!mr->mr.map[i]) | ||
106 | goto bail; | ||
107 | } | ||
108 | mr->mr.mapsz = m; | ||
109 | mr->mr.page_shift = 0; | ||
110 | mr->mr.max_segs = count; | ||
111 | 149 | ||
150 | rval = init_qib_mregion(&mr->mr, pd, count); | ||
151 | if (rval) | ||
152 | goto bail; | ||
112 | /* | 153 | /* |
113 | * ib_reg_phys_mr() will initialize mr->ibmr except for | 154 | * ib_reg_phys_mr() will initialize mr->ibmr except for |
114 | * lkey and rkey. | 155 | * lkey and rkey. |
115 | */ | 156 | */ |
116 | if (!qib_alloc_lkey(lk_table, &mr->mr)) | 157 | rval = qib_alloc_lkey(&mr->mr, 0); |
117 | goto bail; | 158 | if (rval) |
159 | goto bail_mregion; | ||
118 | mr->ibmr.lkey = mr->mr.lkey; | 160 | mr->ibmr.lkey = mr->mr.lkey; |
119 | mr->ibmr.rkey = mr->mr.lkey; | 161 | mr->ibmr.rkey = mr->mr.lkey; |
162 | done: | ||
163 | return mr; | ||
120 | 164 | ||
121 | atomic_set(&mr->mr.refcount, 0); | 165 | bail_mregion: |
122 | goto done; | 166 | deinit_qib_mregion(&mr->mr); |
123 | |||
124 | bail: | 167 | bail: |
125 | while (i) | ||
126 | kfree(mr->mr.map[--i]); | ||
127 | kfree(mr); | 168 | kfree(mr); |
128 | mr = NULL; | 169 | mr = ERR_PTR(rval); |
129 | 170 | goto done; | |
130 | done: | ||
131 | return mr; | ||
132 | } | 171 | } |
133 | 172 | ||
134 | /** | 173 | /** |
@@ -148,19 +187,15 @@ struct ib_mr *qib_reg_phys_mr(struct ib_pd *pd, | |||
148 | int n, m, i; | 187 | int n, m, i; |
149 | struct ib_mr *ret; | 188 | struct ib_mr *ret; |
150 | 189 | ||
151 | mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); | 190 | mr = alloc_mr(num_phys_buf, pd); |
152 | if (mr == NULL) { | 191 | if (IS_ERR(mr)) { |
153 | ret = ERR_PTR(-ENOMEM); | 192 | ret = (struct ib_mr *)mr; |
154 | goto bail; | 193 | goto bail; |
155 | } | 194 | } |
156 | 195 | ||
157 | mr->mr.pd = pd; | ||
158 | mr->mr.user_base = *iova_start; | 196 | mr->mr.user_base = *iova_start; |
159 | mr->mr.iova = *iova_start; | 197 | mr->mr.iova = *iova_start; |
160 | mr->mr.length = 0; | ||
161 | mr->mr.offset = 0; | ||
162 | mr->mr.access_flags = acc; | 198 | mr->mr.access_flags = acc; |
163 | mr->umem = NULL; | ||
164 | 199 | ||
165 | m = 0; | 200 | m = 0; |
166 | n = 0; | 201 | n = 0; |
@@ -186,7 +221,6 @@ bail: | |||
186 | * @pd: protection domain for this memory region | 221 | * @pd: protection domain for this memory region |
187 | * @start: starting userspace address | 222 | * @start: starting userspace address |
188 | * @length: length of region to register | 223 | * @length: length of region to register |
189 | * @virt_addr: virtual address to use (from HCA's point of view) | ||
190 | * @mr_access_flags: access flags for this memory region | 224 | * @mr_access_flags: access flags for this memory region |
191 | * @udata: unused by the QLogic_IB driver | 225 | * @udata: unused by the QLogic_IB driver |
192 | * | 226 | * |
@@ -216,14 +250,13 @@ struct ib_mr *qib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, | |||
216 | list_for_each_entry(chunk, &umem->chunk_list, list) | 250 | list_for_each_entry(chunk, &umem->chunk_list, list) |
217 | n += chunk->nents; | 251 | n += chunk->nents; |
218 | 252 | ||
219 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); | 253 | mr = alloc_mr(n, pd); |
220 | if (!mr) { | 254 | if (IS_ERR(mr)) { |
221 | ret = ERR_PTR(-ENOMEM); | 255 | ret = (struct ib_mr *)mr; |
222 | ib_umem_release(umem); | 256 | ib_umem_release(umem); |
223 | goto bail; | 257 | goto bail; |
224 | } | 258 | } |
225 | 259 | ||
226 | mr->mr.pd = pd; | ||
227 | mr->mr.user_base = start; | 260 | mr->mr.user_base = start; |
228 | mr->mr.iova = virt_addr; | 261 | mr->mr.iova = virt_addr; |
229 | mr->mr.length = length; | 262 | mr->mr.length = length; |
@@ -271,21 +304,25 @@ bail: | |||
271 | int qib_dereg_mr(struct ib_mr *ibmr) | 304 | int qib_dereg_mr(struct ib_mr *ibmr) |
272 | { | 305 | { |
273 | struct qib_mr *mr = to_imr(ibmr); | 306 | struct qib_mr *mr = to_imr(ibmr); |
274 | struct qib_ibdev *dev = to_idev(ibmr->device); | 307 | int ret = 0; |
275 | int ret; | 308 | unsigned long timeout; |
276 | int i; | 309 | |
277 | 310 | qib_free_lkey(&mr->mr); | |
278 | ret = qib_free_lkey(dev, &mr->mr); | 311 | |
279 | if (ret) | 312 | qib_put_mr(&mr->mr); /* will set completion if last */ |
280 | return ret; | 313 | timeout = wait_for_completion_timeout(&mr->mr.comp, |
281 | 314 | 5 * HZ); | |
282 | i = mr->mr.mapsz; | 315 | if (!timeout) { |
283 | while (i) | 316 | qib_get_mr(&mr->mr); |
284 | kfree(mr->mr.map[--i]); | 317 | ret = -EBUSY; |
318 | goto out; | ||
319 | } | ||
320 | deinit_qib_mregion(&mr->mr); | ||
285 | if (mr->umem) | 321 | if (mr->umem) |
286 | ib_umem_release(mr->umem); | 322 | ib_umem_release(mr->umem); |
287 | kfree(mr); | 323 | kfree(mr); |
288 | return 0; | 324 | out: |
325 | return ret; | ||
289 | } | 326 | } |
290 | 327 | ||
291 | /* | 328 | /* |
@@ -298,17 +335,9 @@ struct ib_mr *qib_alloc_fast_reg_mr(struct ib_pd *pd, int max_page_list_len) | |||
298 | { | 335 | { |
299 | struct qib_mr *mr; | 336 | struct qib_mr *mr; |
300 | 337 | ||
301 | mr = alloc_mr(max_page_list_len, &to_idev(pd->device)->lk_table); | 338 | mr = alloc_mr(max_page_list_len, pd); |
302 | if (mr == NULL) | 339 | if (IS_ERR(mr)) |
303 | return ERR_PTR(-ENOMEM); | 340 | return (struct ib_mr *)mr; |
304 | |||
305 | mr->mr.pd = pd; | ||
306 | mr->mr.user_base = 0; | ||
307 | mr->mr.iova = 0; | ||
308 | mr->mr.length = 0; | ||
309 | mr->mr.offset = 0; | ||
310 | mr->mr.access_flags = 0; | ||
311 | mr->umem = NULL; | ||
312 | 341 | ||
313 | return &mr->ibmr; | 342 | return &mr->ibmr; |
314 | } | 343 | } |
@@ -322,11 +351,11 @@ qib_alloc_fast_reg_page_list(struct ib_device *ibdev, int page_list_len) | |||
322 | if (size > PAGE_SIZE) | 351 | if (size > PAGE_SIZE) |
323 | return ERR_PTR(-EINVAL); | 352 | return ERR_PTR(-EINVAL); |
324 | 353 | ||
325 | pl = kmalloc(sizeof *pl, GFP_KERNEL); | 354 | pl = kzalloc(sizeof *pl, GFP_KERNEL); |
326 | if (!pl) | 355 | if (!pl) |
327 | return ERR_PTR(-ENOMEM); | 356 | return ERR_PTR(-ENOMEM); |
328 | 357 | ||
329 | pl->page_list = kmalloc(size, GFP_KERNEL); | 358 | pl->page_list = kzalloc(size, GFP_KERNEL); |
330 | if (!pl->page_list) | 359 | if (!pl->page_list) |
331 | goto err_free; | 360 | goto err_free; |
332 | 361 | ||
@@ -355,57 +384,47 @@ struct ib_fmr *qib_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | |||
355 | struct ib_fmr_attr *fmr_attr) | 384 | struct ib_fmr_attr *fmr_attr) |
356 | { | 385 | { |
357 | struct qib_fmr *fmr; | 386 | struct qib_fmr *fmr; |
358 | int m, i = 0; | 387 | int m; |
359 | struct ib_fmr *ret; | 388 | struct ib_fmr *ret; |
389 | int rval = -ENOMEM; | ||
360 | 390 | ||
361 | /* Allocate struct plus pointers to first level page tables. */ | 391 | /* Allocate struct plus pointers to first level page tables. */ |
362 | m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; | 392 | m = (fmr_attr->max_pages + QIB_SEGSZ - 1) / QIB_SEGSZ; |
363 | fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); | 393 | fmr = kzalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); |
364 | if (!fmr) | 394 | if (!fmr) |
365 | goto bail; | 395 | goto bail; |
366 | 396 | ||
367 | /* Allocate first level page tables. */ | 397 | rval = init_qib_mregion(&fmr->mr, pd, fmr_attr->max_pages); |
368 | for (; i < m; i++) { | 398 | if (rval) |
369 | fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], | 399 | goto bail; |
370 | GFP_KERNEL); | ||
371 | if (!fmr->mr.map[i]) | ||
372 | goto bail; | ||
373 | } | ||
374 | fmr->mr.mapsz = m; | ||
375 | 400 | ||
376 | /* | 401 | /* |
377 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & | 402 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & |
378 | * rkey. | 403 | * rkey. |
379 | */ | 404 | */ |
380 | if (!qib_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) | 405 | rval = qib_alloc_lkey(&fmr->mr, 0); |
381 | goto bail; | 406 | if (rval) |
407 | goto bail_mregion; | ||
382 | fmr->ibfmr.rkey = fmr->mr.lkey; | 408 | fmr->ibfmr.rkey = fmr->mr.lkey; |
383 | fmr->ibfmr.lkey = fmr->mr.lkey; | 409 | fmr->ibfmr.lkey = fmr->mr.lkey; |
384 | /* | 410 | /* |
385 | * Resources are allocated but no valid mapping (RKEY can't be | 411 | * Resources are allocated but no valid mapping (RKEY can't be |
386 | * used). | 412 | * used). |
387 | */ | 413 | */ |
388 | fmr->mr.pd = pd; | ||
389 | fmr->mr.user_base = 0; | ||
390 | fmr->mr.iova = 0; | ||
391 | fmr->mr.length = 0; | ||
392 | fmr->mr.offset = 0; | ||
393 | fmr->mr.access_flags = mr_access_flags; | 414 | fmr->mr.access_flags = mr_access_flags; |
394 | fmr->mr.max_segs = fmr_attr->max_pages; | 415 | fmr->mr.max_segs = fmr_attr->max_pages; |
395 | fmr->mr.page_shift = fmr_attr->page_shift; | 416 | fmr->mr.page_shift = fmr_attr->page_shift; |
396 | 417 | ||
397 | atomic_set(&fmr->mr.refcount, 0); | ||
398 | ret = &fmr->ibfmr; | 418 | ret = &fmr->ibfmr; |
399 | goto done; | 419 | done: |
420 | return ret; | ||
400 | 421 | ||
422 | bail_mregion: | ||
423 | deinit_qib_mregion(&fmr->mr); | ||
401 | bail: | 424 | bail: |
402 | while (i) | ||
403 | kfree(fmr->mr.map[--i]); | ||
404 | kfree(fmr); | 425 | kfree(fmr); |
405 | ret = ERR_PTR(-ENOMEM); | 426 | ret = ERR_PTR(rval); |
406 | 427 | goto done; | |
407 | done: | ||
408 | return ret; | ||
409 | } | 428 | } |
410 | 429 | ||
411 | /** | 430 | /** |
@@ -428,7 +447,8 @@ int qib_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, | |||
428 | u32 ps; | 447 | u32 ps; |
429 | int ret; | 448 | int ret; |
430 | 449 | ||
431 | if (atomic_read(&fmr->mr.refcount)) | 450 | i = atomic_read(&fmr->mr.refcount); |
451 | if (i > 2) | ||
432 | return -EBUSY; | 452 | return -EBUSY; |
433 | 453 | ||
434 | if (list_len > fmr->mr.max_segs) { | 454 | if (list_len > fmr->mr.max_segs) { |
@@ -490,16 +510,20 @@ int qib_unmap_fmr(struct list_head *fmr_list) | |||
490 | int qib_dealloc_fmr(struct ib_fmr *ibfmr) | 510 | int qib_dealloc_fmr(struct ib_fmr *ibfmr) |
491 | { | 511 | { |
492 | struct qib_fmr *fmr = to_ifmr(ibfmr); | 512 | struct qib_fmr *fmr = to_ifmr(ibfmr); |
493 | int ret; | 513 | int ret = 0; |
494 | int i; | 514 | unsigned long timeout; |
495 | 515 | ||
496 | ret = qib_free_lkey(to_idev(ibfmr->device), &fmr->mr); | 516 | qib_free_lkey(&fmr->mr); |
497 | if (ret) | 517 | qib_put_mr(&fmr->mr); /* will set completion if last */ |
498 | return ret; | 518 | timeout = wait_for_completion_timeout(&fmr->mr.comp, |
499 | 519 | 5 * HZ); | |
500 | i = fmr->mr.mapsz; | 520 | if (!timeout) { |
501 | while (i) | 521 | qib_get_mr(&fmr->mr); |
502 | kfree(fmr->mr.map[--i]); | 522 | ret = -EBUSY; |
523 | goto out; | ||
524 | } | ||
525 | deinit_qib_mregion(&fmr->mr); | ||
503 | kfree(fmr); | 526 | kfree(fmr); |
504 | return 0; | 527 | out: |
528 | return ret; | ||
505 | } | 529 | } |
diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 1ce56b51ab1a..693041b076f0 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c | |||
@@ -406,18 +406,9 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
406 | unsigned n; | 406 | unsigned n; |
407 | 407 | ||
408 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | 408 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) |
409 | while (qp->s_rdma_read_sge.num_sge) { | 409 | qib_put_ss(&qp->s_rdma_read_sge); |
410 | atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); | ||
411 | if (--qp->s_rdma_read_sge.num_sge) | ||
412 | qp->s_rdma_read_sge.sge = | ||
413 | *qp->s_rdma_read_sge.sg_list++; | ||
414 | } | ||
415 | 410 | ||
416 | while (qp->r_sge.num_sge) { | 411 | qib_put_ss(&qp->r_sge); |
417 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
418 | if (--qp->r_sge.num_sge) | ||
419 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
420 | } | ||
421 | 412 | ||
422 | if (clr_sends) { | 413 | if (clr_sends) { |
423 | while (qp->s_last != qp->s_head) { | 414 | while (qp->s_last != qp->s_head) { |
@@ -427,7 +418,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
427 | for (i = 0; i < wqe->wr.num_sge; i++) { | 418 | for (i = 0; i < wqe->wr.num_sge; i++) { |
428 | struct qib_sge *sge = &wqe->sg_list[i]; | 419 | struct qib_sge *sge = &wqe->sg_list[i]; |
429 | 420 | ||
430 | atomic_dec(&sge->mr->refcount); | 421 | qib_put_mr(sge->mr); |
431 | } | 422 | } |
432 | if (qp->ibqp.qp_type == IB_QPT_UD || | 423 | if (qp->ibqp.qp_type == IB_QPT_UD || |
433 | qp->ibqp.qp_type == IB_QPT_SMI || | 424 | qp->ibqp.qp_type == IB_QPT_SMI || |
@@ -437,7 +428,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
437 | qp->s_last = 0; | 428 | qp->s_last = 0; |
438 | } | 429 | } |
439 | if (qp->s_rdma_mr) { | 430 | if (qp->s_rdma_mr) { |
440 | atomic_dec(&qp->s_rdma_mr->refcount); | 431 | qib_put_mr(qp->s_rdma_mr); |
441 | qp->s_rdma_mr = NULL; | 432 | qp->s_rdma_mr = NULL; |
442 | } | 433 | } |
443 | } | 434 | } |
@@ -450,7 +441,7 @@ static void clear_mr_refs(struct qib_qp *qp, int clr_sends) | |||
450 | 441 | ||
451 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && | 442 | if (e->opcode == IB_OPCODE_RC_RDMA_READ_REQUEST && |
452 | e->rdma_sge.mr) { | 443 | e->rdma_sge.mr) { |
453 | atomic_dec(&e->rdma_sge.mr->refcount); | 444 | qib_put_mr(e->rdma_sge.mr); |
454 | e->rdma_sge.mr = NULL; | 445 | e->rdma_sge.mr = NULL; |
455 | } | 446 | } |
456 | } | 447 | } |
@@ -495,7 +486,7 @@ int qib_error_qp(struct qib_qp *qp, enum ib_wc_status err) | |||
495 | if (!(qp->s_flags & QIB_S_BUSY)) { | 486 | if (!(qp->s_flags & QIB_S_BUSY)) { |
496 | qp->s_hdrwords = 0; | 487 | qp->s_hdrwords = 0; |
497 | if (qp->s_rdma_mr) { | 488 | if (qp->s_rdma_mr) { |
498 | atomic_dec(&qp->s_rdma_mr->refcount); | 489 | qib_put_mr(qp->s_rdma_mr); |
499 | qp->s_rdma_mr = NULL; | 490 | qp->s_rdma_mr = NULL; |
500 | } | 491 | } |
501 | if (qp->s_tx) { | 492 | if (qp->s_tx) { |
diff --git a/drivers/infiniband/hw/qib/qib_rc.c b/drivers/infiniband/hw/qib/qib_rc.c index b641416148eb..3ab341320ead 100644 --- a/drivers/infiniband/hw/qib/qib_rc.c +++ b/drivers/infiniband/hw/qib/qib_rc.c | |||
@@ -95,7 +95,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, | |||
95 | case OP(RDMA_READ_RESPONSE_ONLY): | 95 | case OP(RDMA_READ_RESPONSE_ONLY): |
96 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; | 96 | e = &qp->s_ack_queue[qp->s_tail_ack_queue]; |
97 | if (e->rdma_sge.mr) { | 97 | if (e->rdma_sge.mr) { |
98 | atomic_dec(&e->rdma_sge.mr->refcount); | 98 | qib_put_mr(e->rdma_sge.mr); |
99 | e->rdma_sge.mr = NULL; | 99 | e->rdma_sge.mr = NULL; |
100 | } | 100 | } |
101 | /* FALLTHROUGH */ | 101 | /* FALLTHROUGH */ |
@@ -133,7 +133,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, | |||
133 | /* Copy SGE state in case we need to resend */ | 133 | /* Copy SGE state in case we need to resend */ |
134 | qp->s_rdma_mr = e->rdma_sge.mr; | 134 | qp->s_rdma_mr = e->rdma_sge.mr; |
135 | if (qp->s_rdma_mr) | 135 | if (qp->s_rdma_mr) |
136 | atomic_inc(&qp->s_rdma_mr->refcount); | 136 | qib_get_mr(qp->s_rdma_mr); |
137 | qp->s_ack_rdma_sge.sge = e->rdma_sge; | 137 | qp->s_ack_rdma_sge.sge = e->rdma_sge; |
138 | qp->s_ack_rdma_sge.num_sge = 1; | 138 | qp->s_ack_rdma_sge.num_sge = 1; |
139 | qp->s_cur_sge = &qp->s_ack_rdma_sge; | 139 | qp->s_cur_sge = &qp->s_ack_rdma_sge; |
@@ -172,7 +172,7 @@ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, | |||
172 | qp->s_cur_sge = &qp->s_ack_rdma_sge; | 172 | qp->s_cur_sge = &qp->s_ack_rdma_sge; |
173 | qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; | 173 | qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; |
174 | if (qp->s_rdma_mr) | 174 | if (qp->s_rdma_mr) |
175 | atomic_inc(&qp->s_rdma_mr->refcount); | 175 | qib_get_mr(qp->s_rdma_mr); |
176 | len = qp->s_ack_rdma_sge.sge.sge_length; | 176 | len = qp->s_ack_rdma_sge.sge.sge_length; |
177 | if (len > pmtu) | 177 | if (len > pmtu) |
178 | len = pmtu; | 178 | len = pmtu; |
@@ -1012,7 +1012,7 @@ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) | |||
1012 | for (i = 0; i < wqe->wr.num_sge; i++) { | 1012 | for (i = 0; i < wqe->wr.num_sge; i++) { |
1013 | struct qib_sge *sge = &wqe->sg_list[i]; | 1013 | struct qib_sge *sge = &wqe->sg_list[i]; |
1014 | 1014 | ||
1015 | atomic_dec(&sge->mr->refcount); | 1015 | qib_put_mr(sge->mr); |
1016 | } | 1016 | } |
1017 | /* Post a send completion queue entry if requested. */ | 1017 | /* Post a send completion queue entry if requested. */ |
1018 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | 1018 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || |
@@ -1068,7 +1068,7 @@ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, | |||
1068 | for (i = 0; i < wqe->wr.num_sge; i++) { | 1068 | for (i = 0; i < wqe->wr.num_sge; i++) { |
1069 | struct qib_sge *sge = &wqe->sg_list[i]; | 1069 | struct qib_sge *sge = &wqe->sg_list[i]; |
1070 | 1070 | ||
1071 | atomic_dec(&sge->mr->refcount); | 1071 | qib_put_mr(sge->mr); |
1072 | } | 1072 | } |
1073 | /* Post a send completion queue entry if requested. */ | 1073 | /* Post a send completion queue entry if requested. */ |
1074 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || | 1074 | if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || |
@@ -1730,7 +1730,7 @@ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, | |||
1730 | if (unlikely(offset + len != e->rdma_sge.sge_length)) | 1730 | if (unlikely(offset + len != e->rdma_sge.sge_length)) |
1731 | goto unlock_done; | 1731 | goto unlock_done; |
1732 | if (e->rdma_sge.mr) { | 1732 | if (e->rdma_sge.mr) { |
1733 | atomic_dec(&e->rdma_sge.mr->refcount); | 1733 | qib_put_mr(e->rdma_sge.mr); |
1734 | e->rdma_sge.mr = NULL; | 1734 | e->rdma_sge.mr = NULL; |
1735 | } | 1735 | } |
1736 | if (len != 0) { | 1736 | if (len != 0) { |
@@ -2024,11 +2024,7 @@ send_last: | |||
2024 | if (unlikely(wc.byte_len > qp->r_len)) | 2024 | if (unlikely(wc.byte_len > qp->r_len)) |
2025 | goto nack_inv; | 2025 | goto nack_inv; |
2026 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | 2026 | qib_copy_sge(&qp->r_sge, data, tlen, 1); |
2027 | while (qp->r_sge.num_sge) { | 2027 | qib_put_ss(&qp->r_sge); |
2028 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
2029 | if (--qp->r_sge.num_sge) | ||
2030 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
2031 | } | ||
2032 | qp->r_msn++; | 2028 | qp->r_msn++; |
2033 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 2029 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
2034 | break; | 2030 | break; |
@@ -2116,7 +2112,7 @@ send_last: | |||
2116 | } | 2112 | } |
2117 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | 2113 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; |
2118 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | 2114 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { |
2119 | atomic_dec(&e->rdma_sge.mr->refcount); | 2115 | qib_put_mr(e->rdma_sge.mr); |
2120 | e->rdma_sge.mr = NULL; | 2116 | e->rdma_sge.mr = NULL; |
2121 | } | 2117 | } |
2122 | reth = &ohdr->u.rc.reth; | 2118 | reth = &ohdr->u.rc.reth; |
@@ -2188,7 +2184,7 @@ send_last: | |||
2188 | } | 2184 | } |
2189 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; | 2185 | e = &qp->s_ack_queue[qp->r_head_ack_queue]; |
2190 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { | 2186 | if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { |
2191 | atomic_dec(&e->rdma_sge.mr->refcount); | 2187 | qib_put_mr(e->rdma_sge.mr); |
2192 | e->rdma_sge.mr = NULL; | 2188 | e->rdma_sge.mr = NULL; |
2193 | } | 2189 | } |
2194 | ateth = &ohdr->u.atomic_eth; | 2190 | ateth = &ohdr->u.atomic_eth; |
@@ -2210,7 +2206,7 @@ send_last: | |||
2210 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | 2206 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
2211 | be64_to_cpu(ateth->compare_data), | 2207 | be64_to_cpu(ateth->compare_data), |
2212 | sdata); | 2208 | sdata); |
2213 | atomic_dec(&qp->r_sge.sge.mr->refcount); | 2209 | qib_put_mr(qp->r_sge.sge.mr); |
2214 | qp->r_sge.num_sge = 0; | 2210 | qp->r_sge.num_sge = 0; |
2215 | e->opcode = opcode; | 2211 | e->opcode = opcode; |
2216 | e->sent = 0; | 2212 | e->sent = 0; |
diff --git a/drivers/infiniband/hw/qib/qib_ruc.c b/drivers/infiniband/hw/qib/qib_ruc.c index c0ee7e095d81..357b6cfcd46c 100644 --- a/drivers/infiniband/hw/qib/qib_ruc.c +++ b/drivers/infiniband/hw/qib/qib_ruc.c | |||
@@ -110,7 +110,7 @@ bad_lkey: | |||
110 | while (j) { | 110 | while (j) { |
111 | struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; | 111 | struct qib_sge *sge = --j ? &ss->sg_list[j - 1] : &ss->sge; |
112 | 112 | ||
113 | atomic_dec(&sge->mr->refcount); | 113 | qib_put_mr(sge->mr); |
114 | } | 114 | } |
115 | ss->num_sge = 0; | 115 | ss->num_sge = 0; |
116 | memset(&wc, 0, sizeof(wc)); | 116 | memset(&wc, 0, sizeof(wc)); |
@@ -501,7 +501,7 @@ again: | |||
501 | (u64) atomic64_add_return(sdata, maddr) - sdata : | 501 | (u64) atomic64_add_return(sdata, maddr) - sdata : |
502 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, | 502 | (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, |
503 | sdata, wqe->wr.wr.atomic.swap); | 503 | sdata, wqe->wr.wr.atomic.swap); |
504 | atomic_dec(&qp->r_sge.sge.mr->refcount); | 504 | qib_put_mr(qp->r_sge.sge.mr); |
505 | qp->r_sge.num_sge = 0; | 505 | qp->r_sge.num_sge = 0; |
506 | goto send_comp; | 506 | goto send_comp; |
507 | 507 | ||
@@ -525,7 +525,7 @@ again: | |||
525 | sge->sge_length -= len; | 525 | sge->sge_length -= len; |
526 | if (sge->sge_length == 0) { | 526 | if (sge->sge_length == 0) { |
527 | if (!release) | 527 | if (!release) |
528 | atomic_dec(&sge->mr->refcount); | 528 | qib_put_mr(sge->mr); |
529 | if (--sqp->s_sge.num_sge) | 529 | if (--sqp->s_sge.num_sge) |
530 | *sge = *sqp->s_sge.sg_list++; | 530 | *sge = *sqp->s_sge.sg_list++; |
531 | } else if (sge->length == 0 && sge->mr->lkey) { | 531 | } else if (sge->length == 0 && sge->mr->lkey) { |
@@ -542,11 +542,7 @@ again: | |||
542 | sqp->s_len -= len; | 542 | sqp->s_len -= len; |
543 | } | 543 | } |
544 | if (release) | 544 | if (release) |
545 | while (qp->r_sge.num_sge) { | 545 | qib_put_ss(&qp->r_sge); |
546 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
547 | if (--qp->r_sge.num_sge) | ||
548 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
549 | } | ||
550 | 546 | ||
551 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 547 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
552 | goto send_comp; | 548 | goto send_comp; |
@@ -782,7 +778,7 @@ void qib_send_complete(struct qib_qp *qp, struct qib_swqe *wqe, | |||
782 | for (i = 0; i < wqe->wr.num_sge; i++) { | 778 | for (i = 0; i < wqe->wr.num_sge; i++) { |
783 | struct qib_sge *sge = &wqe->sg_list[i]; | 779 | struct qib_sge *sge = &wqe->sg_list[i]; |
784 | 780 | ||
785 | atomic_dec(&sge->mr->refcount); | 781 | qib_put_mr(sge->mr); |
786 | } | 782 | } |
787 | if (qp->ibqp.qp_type == IB_QPT_UD || | 783 | if (qp->ibqp.qp_type == IB_QPT_UD || |
788 | qp->ibqp.qp_type == IB_QPT_SMI || | 784 | qp->ibqp.qp_type == IB_QPT_SMI || |
diff --git a/drivers/infiniband/hw/qib/qib_uc.c b/drivers/infiniband/hw/qib/qib_uc.c index 70b4cb710f9a..aa3a8035bb68 100644 --- a/drivers/infiniband/hw/qib/qib_uc.c +++ b/drivers/infiniband/hw/qib/qib_uc.c | |||
@@ -281,11 +281,7 @@ inv: | |||
281 | set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); | 281 | set_bit(QIB_R_REWIND_SGE, &qp->r_aflags); |
282 | qp->r_sge.num_sge = 0; | 282 | qp->r_sge.num_sge = 0; |
283 | } else | 283 | } else |
284 | while (qp->r_sge.num_sge) { | 284 | qib_put_ss(&qp->r_sge); |
285 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
286 | if (--qp->r_sge.num_sge) | ||
287 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
288 | } | ||
289 | qp->r_state = OP(SEND_LAST); | 285 | qp->r_state = OP(SEND_LAST); |
290 | switch (opcode) { | 286 | switch (opcode) { |
291 | case OP(SEND_FIRST): | 287 | case OP(SEND_FIRST): |
@@ -404,12 +400,7 @@ send_last: | |||
404 | goto rewind; | 400 | goto rewind; |
405 | wc.opcode = IB_WC_RECV; | 401 | wc.opcode = IB_WC_RECV; |
406 | qib_copy_sge(&qp->r_sge, data, tlen, 0); | 402 | qib_copy_sge(&qp->r_sge, data, tlen, 0); |
407 | while (qp->s_rdma_read_sge.num_sge) { | 403 | qib_put_ss(&qp->s_rdma_read_sge); |
408 | atomic_dec(&qp->s_rdma_read_sge.sge.mr->refcount); | ||
409 | if (--qp->s_rdma_read_sge.num_sge) | ||
410 | qp->s_rdma_read_sge.sge = | ||
411 | *qp->s_rdma_read_sge.sg_list++; | ||
412 | } | ||
413 | last_imm: | 404 | last_imm: |
414 | wc.wr_id = qp->r_wr_id; | 405 | wc.wr_id = qp->r_wr_id; |
415 | wc.status = IB_WC_SUCCESS; | 406 | wc.status = IB_WC_SUCCESS; |
@@ -493,13 +484,7 @@ rdma_last_imm: | |||
493 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) | 484 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) |
494 | goto drop; | 485 | goto drop; |
495 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) | 486 | if (test_and_clear_bit(QIB_R_REWIND_SGE, &qp->r_aflags)) |
496 | while (qp->s_rdma_read_sge.num_sge) { | 487 | qib_put_ss(&qp->s_rdma_read_sge); |
497 | atomic_dec(&qp->s_rdma_read_sge.sge.mr-> | ||
498 | refcount); | ||
499 | if (--qp->s_rdma_read_sge.num_sge) | ||
500 | qp->s_rdma_read_sge.sge = | ||
501 | *qp->s_rdma_read_sge.sg_list++; | ||
502 | } | ||
503 | else { | 488 | else { |
504 | ret = qib_get_rwqe(qp, 1); | 489 | ret = qib_get_rwqe(qp, 1); |
505 | if (ret < 0) | 490 | if (ret < 0) |
@@ -510,11 +495,7 @@ rdma_last_imm: | |||
510 | wc.byte_len = qp->r_len; | 495 | wc.byte_len = qp->r_len; |
511 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; | 496 | wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; |
512 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | 497 | qib_copy_sge(&qp->r_sge, data, tlen, 1); |
513 | while (qp->r_sge.num_sge) { | 498 | qib_put_ss(&qp->r_sge); |
514 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
515 | if (--qp->r_sge.num_sge) | ||
516 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
517 | } | ||
518 | goto last_imm; | 499 | goto last_imm; |
519 | 500 | ||
520 | case OP(RDMA_WRITE_LAST): | 501 | case OP(RDMA_WRITE_LAST): |
@@ -530,11 +511,7 @@ rdma_last: | |||
530 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) | 511 | if (unlikely(tlen + qp->r_rcv_len != qp->r_len)) |
531 | goto drop; | 512 | goto drop; |
532 | qib_copy_sge(&qp->r_sge, data, tlen, 1); | 513 | qib_copy_sge(&qp->r_sge, data, tlen, 1); |
533 | while (qp->r_sge.num_sge) { | 514 | qib_put_ss(&qp->r_sge); |
534 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
535 | if (--qp->r_sge.num_sge) | ||
536 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
537 | } | ||
538 | break; | 515 | break; |
539 | 516 | ||
540 | default: | 517 | default: |
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c index a468bf2d4465..d6c7fe7f88d5 100644 --- a/drivers/infiniband/hw/qib/qib_ud.c +++ b/drivers/infiniband/hw/qib/qib_ud.c | |||
@@ -194,11 +194,7 @@ static void qib_ud_loopback(struct qib_qp *sqp, struct qib_swqe *swqe) | |||
194 | } | 194 | } |
195 | length -= len; | 195 | length -= len; |
196 | } | 196 | } |
197 | while (qp->r_sge.num_sge) { | 197 | qib_put_ss(&qp->r_sge); |
198 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
199 | if (--qp->r_sge.num_sge) | ||
200 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
201 | } | ||
202 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 198 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
203 | goto bail_unlock; | 199 | goto bail_unlock; |
204 | wc.wr_id = qp->r_wr_id; | 200 | wc.wr_id = qp->r_wr_id; |
@@ -556,11 +552,7 @@ void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | |||
556 | } else | 552 | } else |
557 | qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); | 553 | qib_skip_sge(&qp->r_sge, sizeof(struct ib_grh), 1); |
558 | qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); | 554 | qib_copy_sge(&qp->r_sge, data, wc.byte_len - sizeof(struct ib_grh), 1); |
559 | while (qp->r_sge.num_sge) { | 555 | qib_put_ss(&qp->r_sge); |
560 | atomic_dec(&qp->r_sge.sge.mr->refcount); | ||
561 | if (--qp->r_sge.num_sge) | ||
562 | qp->r_sge.sge = *qp->r_sge.sg_list++; | ||
563 | } | ||
564 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) | 556 | if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) |
565 | return; | 557 | return; |
566 | wc.wr_id = qp->r_wr_id; | 558 | wc.wr_id = qp->r_wr_id; |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c index 7b6c3bffa9d9..76d7ce8a8c6e 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.c +++ b/drivers/infiniband/hw/qib/qib_verbs.c | |||
@@ -183,7 +183,7 @@ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) | |||
183 | sge->sge_length -= len; | 183 | sge->sge_length -= len; |
184 | if (sge->sge_length == 0) { | 184 | if (sge->sge_length == 0) { |
185 | if (release) | 185 | if (release) |
186 | atomic_dec(&sge->mr->refcount); | 186 | qib_put_mr(sge->mr); |
187 | if (--ss->num_sge) | 187 | if (--ss->num_sge) |
188 | *sge = *ss->sg_list++; | 188 | *sge = *ss->sg_list++; |
189 | } else if (sge->length == 0 && sge->mr->lkey) { | 189 | } else if (sge->length == 0 && sge->mr->lkey) { |
@@ -224,7 +224,7 @@ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) | |||
224 | sge->sge_length -= len; | 224 | sge->sge_length -= len; |
225 | if (sge->sge_length == 0) { | 225 | if (sge->sge_length == 0) { |
226 | if (release) | 226 | if (release) |
227 | atomic_dec(&sge->mr->refcount); | 227 | qib_put_mr(sge->mr); |
228 | if (--ss->num_sge) | 228 | if (--ss->num_sge) |
229 | *sge = *ss->sg_list++; | 229 | *sge = *ss->sg_list++; |
230 | } else if (sge->length == 0 && sge->mr->lkey) { | 230 | } else if (sge->length == 0 && sge->mr->lkey) { |
@@ -435,7 +435,7 @@ bail_inval_free: | |||
435 | while (j) { | 435 | while (j) { |
436 | struct qib_sge *sge = &wqe->sg_list[--j]; | 436 | struct qib_sge *sge = &wqe->sg_list[--j]; |
437 | 437 | ||
438 | atomic_dec(&sge->mr->refcount); | 438 | qib_put_mr(sge->mr); |
439 | } | 439 | } |
440 | bail_inval: | 440 | bail_inval: |
441 | ret = -EINVAL; | 441 | ret = -EINVAL; |
@@ -978,7 +978,7 @@ void qib_put_txreq(struct qib_verbs_txreq *tx) | |||
978 | if (atomic_dec_and_test(&qp->refcount)) | 978 | if (atomic_dec_and_test(&qp->refcount)) |
979 | wake_up(&qp->wait); | 979 | wake_up(&qp->wait); |
980 | if (tx->mr) { | 980 | if (tx->mr) { |
981 | atomic_dec(&tx->mr->refcount); | 981 | qib_put_mr(tx->mr); |
982 | tx->mr = NULL; | 982 | tx->mr = NULL; |
983 | } | 983 | } |
984 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { | 984 | if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { |
@@ -1336,7 +1336,7 @@ done: | |||
1336 | } | 1336 | } |
1337 | qib_sendbuf_done(dd, pbufn); | 1337 | qib_sendbuf_done(dd, pbufn); |
1338 | if (qp->s_rdma_mr) { | 1338 | if (qp->s_rdma_mr) { |
1339 | atomic_dec(&qp->s_rdma_mr->refcount); | 1339 | qib_put_mr(qp->s_rdma_mr); |
1340 | qp->s_rdma_mr = NULL; | 1340 | qp->s_rdma_mr = NULL; |
1341 | } | 1341 | } |
1342 | if (qp->s_wqe) { | 1342 | if (qp->s_wqe) { |
diff --git a/drivers/infiniband/hw/qib/qib_verbs.h b/drivers/infiniband/hw/qib/qib_verbs.h index 487606024659..4a2277bc059e 100644 --- a/drivers/infiniband/hw/qib/qib_verbs.h +++ b/drivers/infiniband/hw/qib/qib_verbs.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/interrupt.h> | 41 | #include <linux/interrupt.h> |
42 | #include <linux/kref.h> | 42 | #include <linux/kref.h> |
43 | #include <linux/workqueue.h> | 43 | #include <linux/workqueue.h> |
44 | #include <linux/completion.h> | ||
44 | #include <rdma/ib_pack.h> | 45 | #include <rdma/ib_pack.h> |
45 | #include <rdma/ib_user_verbs.h> | 46 | #include <rdma/ib_user_verbs.h> |
46 | 47 | ||
@@ -302,6 +303,8 @@ struct qib_mregion { | |||
302 | u32 max_segs; /* number of qib_segs in all the arrays */ | 303 | u32 max_segs; /* number of qib_segs in all the arrays */ |
303 | u32 mapsz; /* size of the map array */ | 304 | u32 mapsz; /* size of the map array */ |
304 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ | 305 | u8 page_shift; /* 0 - non unform/non powerof2 sizes */ |
306 | u8 lkey_published; /* in global table */ | ||
307 | struct completion comp; /* complete when refcount goes to zero */ | ||
305 | atomic_t refcount; | 308 | atomic_t refcount; |
306 | struct qib_segarray *map[0]; /* the segments */ | 309 | struct qib_segarray *map[0]; /* the segments */ |
307 | }; | 310 | }; |
@@ -944,9 +947,9 @@ int qib_post_ud_send(struct qib_qp *qp, struct ib_send_wr *wr); | |||
944 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, | 947 | void qib_ud_rcv(struct qib_ibport *ibp, struct qib_ib_header *hdr, |
945 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); | 948 | int has_grh, void *data, u32 tlen, struct qib_qp *qp); |
946 | 949 | ||
947 | int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr); | 950 | int qib_alloc_lkey(struct qib_mregion *mr, int dma_region); |
948 | 951 | ||
949 | int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr); | 952 | void qib_free_lkey(struct qib_mregion *mr); |
950 | 953 | ||
951 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | 954 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, |
952 | struct qib_sge *isge, struct ib_sge *sge, int acc); | 955 | struct qib_sge *isge, struct ib_sge *sge, int acc); |
@@ -1014,6 +1017,27 @@ int qib_unmap_fmr(struct list_head *fmr_list); | |||
1014 | 1017 | ||
1015 | int qib_dealloc_fmr(struct ib_fmr *ibfmr); | 1018 | int qib_dealloc_fmr(struct ib_fmr *ibfmr); |
1016 | 1019 | ||
1020 | static inline void qib_get_mr(struct qib_mregion *mr) | ||
1021 | { | ||
1022 | atomic_inc(&mr->refcount); | ||
1023 | } | ||
1024 | |||
1025 | static inline void qib_put_mr(struct qib_mregion *mr) | ||
1026 | { | ||
1027 | if (unlikely(atomic_dec_and_test(&mr->refcount))) | ||
1028 | complete(&mr->comp); | ||
1029 | } | ||
1030 | |||
1031 | static inline void qib_put_ss(struct qib_sge_state *ss) | ||
1032 | { | ||
1033 | while (ss->num_sge) { | ||
1034 | qib_put_mr(ss->sge.mr); | ||
1035 | if (--ss->num_sge) | ||
1036 | ss->sge = *ss->sg_list++; | ||
1037 | } | ||
1038 | } | ||
1039 | |||
1040 | |||
1017 | void qib_release_mmap_info(struct kref *ref); | 1041 | void qib_release_mmap_info(struct kref *ref); |
1018 | 1042 | ||
1019 | struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, | 1043 | struct qib_mmap_info *qib_create_mmap_info(struct qib_ibdev *dev, u32 size, |