aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband
diff options
context:
space:
mode:
authorMike Marciniszyn <mike.marciniszyn@qlogic.org>2011-01-10 20:42:23 -0500
committerRoland Dreier <rolandd@cisco.com>2011-01-10 20:42:23 -0500
commit4db62d4786e946e6fc8c2bb1f9201508f7f46c41 (patch)
treedbd1bf62fa05e53d225f54d24e6db28eab7bb6cc /drivers/infiniband
parentf2d255a0787119f7f4dc0e6093a0bd2700a49402 (diff)
IB/qib: Fix refcount leak in lkey/rkey validation
The mr optimization introduced a reference count leak on an exception test. The lock/refcount manipulation is moved down and the problematic exception test now calls bail to insure that the lock is released. Additional fixes as suggested by Ralph Campbell <ralph.campbell@qlogic.org>: - reduce lock scope of dma regions - use explicit values on returns vs. automatic ret value Signed-off-by: Mike Marciniszyn <mike.marciniszyn@qlogic.com> Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband')
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c30
1 files changed, 14 insertions, 16 deletions
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 756d16098e73..8fd19a47df0c 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
136 struct qib_mregion *mr; 136 struct qib_mregion *mr;
137 unsigned n, m; 137 unsigned n, m;
138 size_t off; 138 size_t off;
139 int ret = 0;
140 unsigned long flags; 139 unsigned long flags;
141 140
142 /* 141 /*
@@ -152,27 +151,28 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
152 if (!dev->dma_mr) 151 if (!dev->dma_mr)
153 goto bail; 152 goto bail;
154 atomic_inc(&dev->dma_mr->refcount); 153 atomic_inc(&dev->dma_mr->refcount);
154 spin_unlock_irqrestore(&rkt->lock, flags);
155
155 isge->mr = dev->dma_mr; 156 isge->mr = dev->dma_mr;
156 isge->vaddr = (void *) sge->addr; 157 isge->vaddr = (void *) sge->addr;
157 isge->length = sge->length; 158 isge->length = sge->length;
158 isge->sge_length = sge->length; 159 isge->sge_length = sge->length;
159 isge->m = 0; 160 isge->m = 0;
160 isge->n = 0; 161 isge->n = 0;
161 spin_unlock_irqrestore(&rkt->lock, flags);
162 goto ok; 162 goto ok;
163 } 163 }
164 mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]; 164 mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))];
165 if (unlikely(mr == NULL || mr->lkey != sge->lkey || 165 if (unlikely(mr == NULL || mr->lkey != sge->lkey ||
166 mr->pd != &pd->ibpd)) 166 mr->pd != &pd->ibpd))
167 goto bail; 167 goto bail;
168 atomic_inc(&mr->refcount);
169 spin_unlock_irqrestore(&rkt->lock, flags);
170 168
171 off = sge->addr - mr->user_base; 169 off = sge->addr - mr->user_base;
172 if (unlikely(sge->addr < mr->user_base || 170 if (unlikely(sge->addr < mr->user_base ||
173 off + sge->length > mr->length || 171 off + sge->length > mr->length ||
174 (mr->access_flags & acc) != acc)) 172 (mr->access_flags & acc) != acc))
175 return ret; 173 goto bail;
174 atomic_inc(&mr->refcount);
175 spin_unlock_irqrestore(&rkt->lock, flags);
176 176
177 off += mr->offset; 177 off += mr->offset;
178 if (mr->page_shift) { 178 if (mr->page_shift) {
@@ -206,11 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
206 isge->m = m; 206 isge->m = m;
207 isge->n = n; 207 isge->n = n;
208ok: 208ok:
209 ret = 1; 209 return 1;
210 return ret;
211bail: 210bail:
212 spin_unlock_irqrestore(&rkt->lock, flags); 211 spin_unlock_irqrestore(&rkt->lock, flags);
213 return ret; 212 return 0;
214} 213}
215 214
216/** 215/**
@@ -231,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
231 struct qib_mregion *mr; 230 struct qib_mregion *mr;
232 unsigned n, m; 231 unsigned n, m;
233 size_t off; 232 size_t off;
234 int ret = 0;
235 unsigned long flags; 233 unsigned long flags;
236 234
237 /* 235 /*
@@ -248,26 +246,27 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
248 if (!dev->dma_mr) 246 if (!dev->dma_mr)
249 goto bail; 247 goto bail;
250 atomic_inc(&dev->dma_mr->refcount); 248 atomic_inc(&dev->dma_mr->refcount);
249 spin_unlock_irqrestore(&rkt->lock, flags);
250
251 sge->mr = dev->dma_mr; 251 sge->mr = dev->dma_mr;
252 sge->vaddr = (void *) vaddr; 252 sge->vaddr = (void *) vaddr;
253 sge->length = len; 253 sge->length = len;
254 sge->sge_length = len; 254 sge->sge_length = len;
255 sge->m = 0; 255 sge->m = 0;
256 sge->n = 0; 256 sge->n = 0;
257 spin_unlock_irqrestore(&rkt->lock, flags);
258 goto ok; 257 goto ok;
259 } 258 }
260 259
261 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; 260 mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))];
262 if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) 261 if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd))
263 goto bail; 262 goto bail;
264 atomic_inc(&mr->refcount);
265 spin_unlock_irqrestore(&rkt->lock, flags);
266 263
267 off = vaddr - mr->iova; 264 off = vaddr - mr->iova;
268 if (unlikely(vaddr < mr->iova || off + len > mr->length || 265 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
269 (mr->access_flags & acc) == 0)) 266 (mr->access_flags & acc) == 0))
270 return ret; 267 goto bail;
268 atomic_inc(&mr->refcount);
269 spin_unlock_irqrestore(&rkt->lock, flags);
271 270
272 off += mr->offset; 271 off += mr->offset;
273 if (mr->page_shift) { 272 if (mr->page_shift) {
@@ -301,11 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
301 sge->m = m; 300 sge->m = m;
302 sge->n = n; 301 sge->n = n;
303ok: 302ok:
304 ret = 1; 303 return 1;
305 return ret;
306bail: 304bail:
307 spin_unlock_irqrestore(&rkt->lock, flags); 305 spin_unlock_irqrestore(&rkt->lock, flags);
308 return ret; 306 return 0;
309} 307}
310 308
311/* 309/*