diff options
Diffstat (limited to 'drivers/infiniband/hw/qib/qib_keys.c')
-rw-r--r-- | drivers/infiniband/hw/qib/qib_keys.c | 328 |
1 files changed, 328 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/qib/qib_keys.c b/drivers/infiniband/hw/qib/qib_keys.c new file mode 100644 index 000000000000..4b80eb153d57 --- /dev/null +++ b/drivers/infiniband/hw/qib/qib_keys.c | |||
@@ -0,0 +1,328 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2006, 2007, 2009 QLogic Corporation. All rights reserved. | ||
3 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
4 | * | ||
5 | * This software is available to you under a choice of one of two | ||
6 | * licenses. You may choose to be licensed under the terms of the GNU | ||
7 | * General Public License (GPL) Version 2, available from the file | ||
8 | * COPYING in the main directory of this source tree, or the | ||
9 | * OpenIB.org BSD license below: | ||
10 | * | ||
11 | * Redistribution and use in source and binary forms, with or | ||
12 | * without modification, are permitted provided that the following | ||
13 | * conditions are met: | ||
14 | * | ||
15 | * - Redistributions of source code must retain the above | ||
16 | * copyright notice, this list of conditions and the following | ||
17 | * disclaimer. | ||
18 | * | ||
19 | * - Redistributions in binary form must reproduce the above | ||
20 | * copyright notice, this list of conditions and the following | ||
21 | * disclaimer in the documentation and/or other materials | ||
22 | * provided with the distribution. | ||
23 | * | ||
24 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
25 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
26 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
27 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
28 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
29 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
30 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
31 | * SOFTWARE. | ||
32 | */ | ||
33 | |||
34 | #include "qib.h" | ||
35 | |||
36 | /** | ||
37 | * qib_alloc_lkey - allocate an lkey | ||
38 | * @rkt: lkey table in which to allocate the lkey | ||
39 | * @mr: memory region that this lkey protects | ||
40 | * | ||
41 | * Returns 1 if successful, otherwise returns 0. | ||
42 | */ | ||
43 | |||
44 | int qib_alloc_lkey(struct qib_lkey_table *rkt, struct qib_mregion *mr) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | u32 r; | ||
48 | u32 n; | ||
49 | int ret; | ||
50 | |||
51 | spin_lock_irqsave(&rkt->lock, flags); | ||
52 | |||
53 | /* Find the next available LKEY */ | ||
54 | r = rkt->next; | ||
55 | n = r; | ||
56 | for (;;) { | ||
57 | if (rkt->table[r] == NULL) | ||
58 | break; | ||
59 | r = (r + 1) & (rkt->max - 1); | ||
60 | if (r == n) { | ||
61 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
62 | ret = 0; | ||
63 | goto bail; | ||
64 | } | ||
65 | } | ||
66 | rkt->next = (r + 1) & (rkt->max - 1); | ||
67 | /* | ||
68 | * Make sure lkey is never zero which is reserved to indicate an | ||
69 | * unrestricted LKEY. | ||
70 | */ | ||
71 | rkt->gen++; | ||
72 | mr->lkey = (r << (32 - ib_qib_lkey_table_size)) | | ||
73 | ((((1 << (24 - ib_qib_lkey_table_size)) - 1) & rkt->gen) | ||
74 | << 8); | ||
75 | if (mr->lkey == 0) { | ||
76 | mr->lkey |= 1 << 8; | ||
77 | rkt->gen++; | ||
78 | } | ||
79 | rkt->table[r] = mr; | ||
80 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
81 | |||
82 | ret = 1; | ||
83 | |||
84 | bail: | ||
85 | return ret; | ||
86 | } | ||
87 | |||
88 | /** | ||
89 | * qib_free_lkey - free an lkey | ||
90 | * @rkt: table from which to free the lkey | ||
91 | * @lkey: lkey id to free | ||
92 | */ | ||
93 | int qib_free_lkey(struct qib_ibdev *dev, struct qib_mregion *mr) | ||
94 | { | ||
95 | unsigned long flags; | ||
96 | u32 lkey = mr->lkey; | ||
97 | u32 r; | ||
98 | int ret; | ||
99 | |||
100 | spin_lock_irqsave(&dev->lk_table.lock, flags); | ||
101 | if (lkey == 0) { | ||
102 | if (dev->dma_mr && dev->dma_mr == mr) { | ||
103 | ret = atomic_read(&dev->dma_mr->refcount); | ||
104 | if (!ret) | ||
105 | dev->dma_mr = NULL; | ||
106 | } else | ||
107 | ret = 0; | ||
108 | } else { | ||
109 | r = lkey >> (32 - ib_qib_lkey_table_size); | ||
110 | ret = atomic_read(&dev->lk_table.table[r]->refcount); | ||
111 | if (!ret) | ||
112 | dev->lk_table.table[r] = NULL; | ||
113 | } | ||
114 | spin_unlock_irqrestore(&dev->lk_table.lock, flags); | ||
115 | |||
116 | if (ret) | ||
117 | ret = -EBUSY; | ||
118 | return ret; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * qib_lkey_ok - check IB SGE for validity and initialize | ||
123 | * @rkt: table containing lkey to check SGE against | ||
124 | * @isge: outgoing internal SGE | ||
125 | * @sge: SGE to check | ||
126 | * @acc: access flags | ||
127 | * | ||
128 | * Return 1 if valid and successful, otherwise returns 0. | ||
129 | * | ||
130 | * Check the IB SGE for validity and initialize our internal version | ||
131 | * of it. | ||
132 | */ | ||
133 | int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd, | ||
134 | struct qib_sge *isge, struct ib_sge *sge, int acc) | ||
135 | { | ||
136 | struct qib_mregion *mr; | ||
137 | unsigned n, m; | ||
138 | size_t off; | ||
139 | int ret = 0; | ||
140 | unsigned long flags; | ||
141 | |||
142 | /* | ||
143 | * We use LKEY == zero for kernel virtual addresses | ||
144 | * (see qib_get_dma_mr and qib_dma.c). | ||
145 | */ | ||
146 | spin_lock_irqsave(&rkt->lock, flags); | ||
147 | if (sge->lkey == 0) { | ||
148 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); | ||
149 | |||
150 | if (pd->user) | ||
151 | goto bail; | ||
152 | if (!dev->dma_mr) | ||
153 | goto bail; | ||
154 | atomic_inc(&dev->dma_mr->refcount); | ||
155 | isge->mr = dev->dma_mr; | ||
156 | isge->vaddr = (void *) sge->addr; | ||
157 | isge->length = sge->length; | ||
158 | isge->sge_length = sge->length; | ||
159 | isge->m = 0; | ||
160 | isge->n = 0; | ||
161 | goto ok; | ||
162 | } | ||
163 | mr = rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]; | ||
164 | if (unlikely(mr == NULL || mr->lkey != sge->lkey || | ||
165 | mr->pd != &pd->ibpd)) | ||
166 | goto bail; | ||
167 | |||
168 | off = sge->addr - mr->user_base; | ||
169 | if (unlikely(sge->addr < mr->user_base || | ||
170 | off + sge->length > mr->length || | ||
171 | (mr->access_flags & acc) != acc)) | ||
172 | goto bail; | ||
173 | |||
174 | off += mr->offset; | ||
175 | m = 0; | ||
176 | n = 0; | ||
177 | while (off >= mr->map[m]->segs[n].length) { | ||
178 | off -= mr->map[m]->segs[n].length; | ||
179 | n++; | ||
180 | if (n >= QIB_SEGSZ) { | ||
181 | m++; | ||
182 | n = 0; | ||
183 | } | ||
184 | } | ||
185 | atomic_inc(&mr->refcount); | ||
186 | isge->mr = mr; | ||
187 | isge->vaddr = mr->map[m]->segs[n].vaddr + off; | ||
188 | isge->length = mr->map[m]->segs[n].length - off; | ||
189 | isge->sge_length = sge->length; | ||
190 | isge->m = m; | ||
191 | isge->n = n; | ||
192 | ok: | ||
193 | ret = 1; | ||
194 | bail: | ||
195 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
196 | return ret; | ||
197 | } | ||
198 | |||
199 | /** | ||
200 | * qib_rkey_ok - check the IB virtual address, length, and RKEY | ||
201 | * @dev: infiniband device | ||
202 | * @ss: SGE state | ||
203 | * @len: length of data | ||
204 | * @vaddr: virtual address to place data | ||
205 | * @rkey: rkey to check | ||
206 | * @acc: access flags | ||
207 | * | ||
208 | * Return 1 if successful, otherwise 0. | ||
209 | */ | ||
210 | int qib_rkey_ok(struct qib_qp *qp, struct qib_sge *sge, | ||
211 | u32 len, u64 vaddr, u32 rkey, int acc) | ||
212 | { | ||
213 | struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; | ||
214 | struct qib_mregion *mr; | ||
215 | unsigned n, m; | ||
216 | size_t off; | ||
217 | int ret = 0; | ||
218 | unsigned long flags; | ||
219 | |||
220 | /* | ||
221 | * We use RKEY == zero for kernel virtual addresses | ||
222 | * (see qib_get_dma_mr and qib_dma.c). | ||
223 | */ | ||
224 | spin_lock_irqsave(&rkt->lock, flags); | ||
225 | if (rkey == 0) { | ||
226 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); | ||
227 | struct qib_ibdev *dev = to_idev(pd->ibpd.device); | ||
228 | |||
229 | if (pd->user) | ||
230 | goto bail; | ||
231 | if (!dev->dma_mr) | ||
232 | goto bail; | ||
233 | atomic_inc(&dev->dma_mr->refcount); | ||
234 | sge->mr = dev->dma_mr; | ||
235 | sge->vaddr = (void *) vaddr; | ||
236 | sge->length = len; | ||
237 | sge->sge_length = len; | ||
238 | sge->m = 0; | ||
239 | sge->n = 0; | ||
240 | goto ok; | ||
241 | } | ||
242 | |||
243 | mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; | ||
244 | if (unlikely(mr == NULL || mr->lkey != rkey || qp->ibqp.pd != mr->pd)) | ||
245 | goto bail; | ||
246 | |||
247 | off = vaddr - mr->iova; | ||
248 | if (unlikely(vaddr < mr->iova || off + len > mr->length || | ||
249 | (mr->access_flags & acc) == 0)) | ||
250 | goto bail; | ||
251 | |||
252 | off += mr->offset; | ||
253 | m = 0; | ||
254 | n = 0; | ||
255 | while (off >= mr->map[m]->segs[n].length) { | ||
256 | off -= mr->map[m]->segs[n].length; | ||
257 | n++; | ||
258 | if (n >= QIB_SEGSZ) { | ||
259 | m++; | ||
260 | n = 0; | ||
261 | } | ||
262 | } | ||
263 | atomic_inc(&mr->refcount); | ||
264 | sge->mr = mr; | ||
265 | sge->vaddr = mr->map[m]->segs[n].vaddr + off; | ||
266 | sge->length = mr->map[m]->segs[n].length - off; | ||
267 | sge->sge_length = len; | ||
268 | sge->m = m; | ||
269 | sge->n = n; | ||
270 | ok: | ||
271 | ret = 1; | ||
272 | bail: | ||
273 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
274 | return ret; | ||
275 | } | ||
276 | |||
277 | /* | ||
278 | * Initialize the memory region specified by the work reqeust. | ||
279 | */ | ||
280 | int qib_fast_reg_mr(struct qib_qp *qp, struct ib_send_wr *wr) | ||
281 | { | ||
282 | struct qib_lkey_table *rkt = &to_idev(qp->ibqp.device)->lk_table; | ||
283 | struct qib_pd *pd = to_ipd(qp->ibqp.pd); | ||
284 | struct qib_mregion *mr; | ||
285 | u32 rkey = wr->wr.fast_reg.rkey; | ||
286 | unsigned i, n, m; | ||
287 | int ret = -EINVAL; | ||
288 | unsigned long flags; | ||
289 | u64 *page_list; | ||
290 | size_t ps; | ||
291 | |||
292 | spin_lock_irqsave(&rkt->lock, flags); | ||
293 | if (pd->user || rkey == 0) | ||
294 | goto bail; | ||
295 | |||
296 | mr = rkt->table[(rkey >> (32 - ib_qib_lkey_table_size))]; | ||
297 | if (unlikely(mr == NULL || qp->ibqp.pd != mr->pd)) | ||
298 | goto bail; | ||
299 | |||
300 | if (wr->wr.fast_reg.page_list_len > mr->max_segs) | ||
301 | goto bail; | ||
302 | |||
303 | ps = 1UL << wr->wr.fast_reg.page_shift; | ||
304 | if (wr->wr.fast_reg.length > ps * wr->wr.fast_reg.page_list_len) | ||
305 | goto bail; | ||
306 | |||
307 | mr->user_base = wr->wr.fast_reg.iova_start; | ||
308 | mr->iova = wr->wr.fast_reg.iova_start; | ||
309 | mr->lkey = rkey; | ||
310 | mr->length = wr->wr.fast_reg.length; | ||
311 | mr->access_flags = wr->wr.fast_reg.access_flags; | ||
312 | page_list = wr->wr.fast_reg.page_list->page_list; | ||
313 | m = 0; | ||
314 | n = 0; | ||
315 | for (i = 0; i < wr->wr.fast_reg.page_list_len; i++) { | ||
316 | mr->map[m]->segs[n].vaddr = (void *) page_list[i]; | ||
317 | mr->map[m]->segs[n].length = ps; | ||
318 | if (++n == QIB_SEGSZ) { | ||
319 | m++; | ||
320 | n = 0; | ||
321 | } | ||
322 | } | ||
323 | |||
324 | ret = 0; | ||
325 | bail: | ||
326 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
327 | return ret; | ||
328 | } | ||