aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/infiniband/hw/qib/qib_keys.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_keys.c')
-rw-r--r--drivers/infiniband/hw/qib/qib_keys.c80
1 files changed, 56 insertions, 24 deletions
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c
index 4b80eb153d57..8fd19a47df0c 100644
--- a/drivers/infiniband/hw/qib/qib_keys.c
+++ b/drivers/infiniband/hw/qib/qib_keys.c
@@ -136,7 +136,6 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
136 struct qib_mregion *mr; 136 struct qib_mregion *mr;
137 unsigned n, m; 137 unsigned n, m;
138 size_t off; 138 size_t off;
139 int ret = 0;
140 unsigned long flags; 139 unsigned long flags;
141 140
142 /* 141 /*
@@ -152,6 +151,8 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
152 if (!dev->dma_mr) 151 if (!dev->dma_mr)
153 goto bail; 152 goto bail;
154 atomic_inc(&dev->dma_mr->refcount); 153 atomic_inc(&dev->dma_mr->refcount);
154 spin_unlock_irqrestore(&rkt->lock, flags);
155
155 isge->mr = dev->dma_mr; 156 isge->mr = dev->dma_mr;
156 isge->vaddr = (void *) sge->addr; 157 isge->vaddr = (void *) sge->addr;
157 isge->length = sge->length; 158 isge->length = sge->length;
@@ -170,19 +171,34 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
170 off + sge->length > mr->length || 171 off + sge->length > mr->length ||
171 (mr->access_flags & acc) != acc)) 172 (mr->access_flags & acc) != acc))
172 goto bail; 173 goto bail;
174 atomic_inc(&mr->refcount);
175 spin_unlock_irqrestore(&rkt->lock, flags);
173 176
174 off += mr->offset; 177 off += mr->offset;
175 m = 0; 178 if (mr->page_shift) {
176 n = 0; 179 /*
177 while (off >= mr->map[m]->segs[n].length) { 180 page sizes are uniform power of 2 so no loop is necessary
178 off -= mr->map[m]->segs[n].length; 181 entries_spanned_by_off is the number of times the loop below
179 n++; 182 would have executed.
180 if (n >= QIB_SEGSZ) { 183 */
181 m++; 184 size_t entries_spanned_by_off;
182 n = 0; 185
186 entries_spanned_by_off = off >> mr->page_shift;
187 off -= (entries_spanned_by_off << mr->page_shift);
188 m = entries_spanned_by_off/QIB_SEGSZ;
189 n = entries_spanned_by_off%QIB_SEGSZ;
190 } else {
191 m = 0;
192 n = 0;
193 while (off >= mr->map[m]->segs[n].length) {
194 off -= mr->map[m]->segs[n].length;
195 n++;
196 if (n >= QIB_SEGSZ) {
197 m++;
198 n = 0;
199 }
183 } 200 }
184 } 201 }
185 atomic_inc(&mr->refcount);
186 isge->mr = mr; 202 isge->mr = mr;
187 isge->vaddr = mr->map[m]->segs[n].vaddr + off; 203 isge->vaddr = mr->map[m]->segs[n].vaddr + off;
188 isge->length = mr->map[m]->segs[n].length - off; 204 isge->length = mr->map[m]->segs[n].length - off;
@@ -190,10 +206,10 @@ int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
190 isge->m = m; 206 isge->m = m;
191 isge->n = n; 207 isge->n = n;
192ok: 208ok:
193 ret = 1; 209 return 1;
194bail: 210bail:
195 spin_unlock_irqrestore(&rkt->lock, flags); 211 spin_unlock_irqrestore(&rkt->lock, flags);
196 return ret; 212 return 0;
197} 213}
198 214
199/** 215/**
@@ -214,7 +230,6 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
214 struct qib_mregion *mr; 230 struct qib_mregion *mr;
215 unsigned n, m; 231 unsigned n, m;
216 size_t off; 232 size_t off;
217 int ret = 0;
218 unsigned long flags; 233 unsigned long flags;
219 234
220 /* 235 /*
@@ -231,6 +246,8 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
231 if (!dev->dma_mr) 246 if (!dev->dma_mr)
232 goto bail; 247 goto bail;
233 atomic_inc(&dev->dma_mr->refcount); 248 atomic_inc(&dev->dma_mr->refcount);
249 spin_unlock_irqrestore(&rkt->lock, flags);
250
234 sge->mr = dev->dma_mr; 251 sge->mr = dev->dma_mr;
235 sge->vaddr = (void *) vaddr; 252 sge->vaddr = (void *) vaddr;
236 sge->length = len; 253 sge->length = len;
@@ -248,19 +265,34 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
248 if (unlikely(vaddr < mr->iova || off + len > mr->length || 265 if (unlikely(vaddr < mr->iova || off + len > mr->length ||
249 (mr->access_flags & acc) == 0)) 266 (mr->access_flags & acc) == 0))
250 goto bail; 267 goto bail;
268 atomic_inc(&mr->refcount);
269 spin_unlock_irqrestore(&rkt->lock, flags);
251 270
252 off += mr->offset; 271 off += mr->offset;
253 m = 0; 272 if (mr->page_shift) {
254 n = 0; 273 /*
255 while (off >= mr->map[m]->segs[n].length) { 274 page sizes are uniform power of 2 so no loop is necessary
256 off -= mr->map[m]->segs[n].length; 275 entries_spanned_by_off is the number of times the loop below
257 n++; 276 would have executed.
258 if (n >= QIB_SEGSZ) { 277 */
259 m++; 278 size_t entries_spanned_by_off;
260 n = 0; 279
280 entries_spanned_by_off = off >> mr->page_shift;
281 off -= (entries_spanned_by_off << mr->page_shift);
282 m = entries_spanned_by_off/QIB_SEGSZ;
283 n = entries_spanned_by_off%QIB_SEGSZ;
284 } else {
285 m = 0;
286 n = 0;
287 while (off >= mr->map[m]->segs[n].length) {
288 off -= mr->map[m]->segs[n].length;
289 n++;
290 if (n >= QIB_SEGSZ) {
291 m++;
292 n = 0;
293 }
261 } 294 }
262 } 295 }
263 atomic_inc(&mr->refcount);
264 sge->mr = mr; 296 sge->mr = mr;
265 sge->vaddr = mr->map[m]->segs[n].vaddr + off; 297 sge->vaddr = mr->map[m]->segs[n].vaddr + off;
266 sge->length = mr->map[m]->segs[n].length - off; 298 sge->length = mr->map[m]->segs[n].length - off;
@@ -268,10 +300,10 @@ int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge,
268 sge->m = m; 300 sge->m = m;
269 sge->n = n; 301 sge->n = n;
270ok: 302ok:
271 ret = 1; 303 return 1;
272bail: 304bail:
273 spin_unlock_irqrestore(&rkt->lock, flags); 305 spin_unlock_irqrestore(&rkt->lock, flags);
274 return ret; 306 return 0;
275} 307}
276 308
277/* 309/*