diff options
author | Bryan O'Sullivan <bos@pathscale.com> | 2006-03-29 18:23:36 -0500 |
---|---|---|
committer | Roland Dreier <rolandd@cisco.com> | 2006-03-31 16:14:20 -0500 |
commit | cef1cce5c87d84f76e44f0e7b4de72ab3818ac3a (patch) | |
tree | c9cb13413cae9dd636a699e3ec7d41882fd8e514 /drivers/infiniband/hw/ipath/ipath_mr.c | |
parent | 97f9efbc47f0b1bc88abac8724b505f0794a48d0 (diff) |
IB/ipath: misc infiniband code, part 1
Completion queues, local and remote memory keys, and memory region
support.
Signed-off-by: Bryan O'Sullivan <bos@pathscale.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
Diffstat (limited to 'drivers/infiniband/hw/ipath/ipath_mr.c')
-rw-r--r-- | drivers/infiniband/hw/ipath/ipath_mr.c | 383 |
1 files changed, 383 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/ipath/ipath_mr.c b/drivers/infiniband/hw/ipath/ipath_mr.c new file mode 100644 index 000000000000..69ffec66d45d --- /dev/null +++ b/drivers/infiniband/hw/ipath/ipath_mr.c | |||
@@ -0,0 +1,383 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #include <rdma/ib_pack.h> | ||
34 | #include <rdma/ib_smi.h> | ||
35 | |||
36 | #include "ipath_verbs.h" | ||
37 | |||
38 | /** | ||
39 | * ipath_get_dma_mr - get a DMA memory region | ||
40 | * @pd: protection domain for this memory region | ||
41 | * @acc: access flags | ||
42 | * | ||
43 | * Returns the memory region on success, otherwise returns an errno. | ||
44 | */ | ||
45 | struct ib_mr *ipath_get_dma_mr(struct ib_pd *pd, int acc) | ||
46 | { | ||
47 | struct ipath_mr *mr; | ||
48 | struct ib_mr *ret; | ||
49 | |||
50 | mr = kzalloc(sizeof *mr, GFP_KERNEL); | ||
51 | if (!mr) { | ||
52 | ret = ERR_PTR(-ENOMEM); | ||
53 | goto bail; | ||
54 | } | ||
55 | |||
56 | mr->mr.access_flags = acc; | ||
57 | ret = &mr->ibmr; | ||
58 | |||
59 | bail: | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static struct ipath_mr *alloc_mr(int count, | ||
64 | struct ipath_lkey_table *lk_table) | ||
65 | { | ||
66 | struct ipath_mr *mr; | ||
67 | int m, i = 0; | ||
68 | |||
69 | /* Allocate struct plus pointers to first level page tables. */ | ||
70 | m = (count + IPATH_SEGSZ - 1) / IPATH_SEGSZ; | ||
71 | mr = kmalloc(sizeof *mr + m * sizeof mr->mr.map[0], GFP_KERNEL); | ||
72 | if (!mr) | ||
73 | goto done; | ||
74 | |||
75 | /* Allocate first level page tables. */ | ||
76 | for (; i < m; i++) { | ||
77 | mr->mr.map[i] = kmalloc(sizeof *mr->mr.map[0], GFP_KERNEL); | ||
78 | if (!mr->mr.map[i]) | ||
79 | goto bail; | ||
80 | } | ||
81 | mr->mr.mapsz = m; | ||
82 | |||
83 | /* | ||
84 | * ib_reg_phys_mr() will initialize mr->ibmr except for | ||
85 | * lkey and rkey. | ||
86 | */ | ||
87 | if (!ipath_alloc_lkey(lk_table, &mr->mr)) | ||
88 | goto bail; | ||
89 | mr->ibmr.rkey = mr->ibmr.lkey = mr->mr.lkey; | ||
90 | |||
91 | goto done; | ||
92 | |||
93 | bail: | ||
94 | while (i) { | ||
95 | i--; | ||
96 | kfree(mr->mr.map[i]); | ||
97 | } | ||
98 | kfree(mr); | ||
99 | mr = NULL; | ||
100 | |||
101 | done: | ||
102 | return mr; | ||
103 | } | ||
104 | |||
105 | /** | ||
106 | * ipath_reg_phys_mr - register a physical memory region | ||
107 | * @pd: protection domain for this memory region | ||
108 | * @buffer_list: pointer to the list of physical buffers to register | ||
109 | * @num_phys_buf: the number of physical buffers to register | ||
110 | * @iova_start: the starting address passed over IB which maps to this MR | ||
111 | * | ||
112 | * Returns the memory region on success, otherwise returns an errno. | ||
113 | */ | ||
114 | struct ib_mr *ipath_reg_phys_mr(struct ib_pd *pd, | ||
115 | struct ib_phys_buf *buffer_list, | ||
116 | int num_phys_buf, int acc, u64 *iova_start) | ||
117 | { | ||
118 | struct ipath_mr *mr; | ||
119 | int n, m, i; | ||
120 | struct ib_mr *ret; | ||
121 | |||
122 | mr = alloc_mr(num_phys_buf, &to_idev(pd->device)->lk_table); | ||
123 | if (mr == NULL) { | ||
124 | ret = ERR_PTR(-ENOMEM); | ||
125 | goto bail; | ||
126 | } | ||
127 | |||
128 | mr->mr.user_base = *iova_start; | ||
129 | mr->mr.iova = *iova_start; | ||
130 | mr->mr.length = 0; | ||
131 | mr->mr.offset = 0; | ||
132 | mr->mr.access_flags = acc; | ||
133 | mr->mr.max_segs = num_phys_buf; | ||
134 | |||
135 | m = 0; | ||
136 | n = 0; | ||
137 | for (i = 0; i < num_phys_buf; i++) { | ||
138 | mr->mr.map[m]->segs[n].vaddr = | ||
139 | phys_to_virt(buffer_list[i].addr); | ||
140 | mr->mr.map[m]->segs[n].length = buffer_list[i].size; | ||
141 | mr->mr.length += buffer_list[i].size; | ||
142 | n++; | ||
143 | if (n == IPATH_SEGSZ) { | ||
144 | m++; | ||
145 | n = 0; | ||
146 | } | ||
147 | } | ||
148 | |||
149 | ret = &mr->ibmr; | ||
150 | |||
151 | bail: | ||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | /** | ||
156 | * ipath_reg_user_mr - register a userspace memory region | ||
157 | * @pd: protection domain for this memory region | ||
158 | * @region: the user memory region | ||
159 | * @mr_access_flags: access flags for this memory region | ||
160 | * @udata: unused by the InfiniPath driver | ||
161 | * | ||
162 | * Returns the memory region on success, otherwise returns an errno. | ||
163 | */ | ||
164 | struct ib_mr *ipath_reg_user_mr(struct ib_pd *pd, struct ib_umem *region, | ||
165 | int mr_access_flags, struct ib_udata *udata) | ||
166 | { | ||
167 | struct ipath_mr *mr; | ||
168 | struct ib_umem_chunk *chunk; | ||
169 | int n, m, i; | ||
170 | struct ib_mr *ret; | ||
171 | |||
172 | n = 0; | ||
173 | list_for_each_entry(chunk, ®ion->chunk_list, list) | ||
174 | n += chunk->nents; | ||
175 | |||
176 | mr = alloc_mr(n, &to_idev(pd->device)->lk_table); | ||
177 | if (!mr) { | ||
178 | ret = ERR_PTR(-ENOMEM); | ||
179 | goto bail; | ||
180 | } | ||
181 | |||
182 | mr->mr.user_base = region->user_base; | ||
183 | mr->mr.iova = region->virt_base; | ||
184 | mr->mr.length = region->length; | ||
185 | mr->mr.offset = region->offset; | ||
186 | mr->mr.access_flags = mr_access_flags; | ||
187 | mr->mr.max_segs = n; | ||
188 | |||
189 | m = 0; | ||
190 | n = 0; | ||
191 | list_for_each_entry(chunk, ®ion->chunk_list, list) { | ||
192 | for (i = 0; i < chunk->nmap; i++) { | ||
193 | mr->mr.map[m]->segs[n].vaddr = | ||
194 | page_address(chunk->page_list[i].page); | ||
195 | mr->mr.map[m]->segs[n].length = region->page_size; | ||
196 | n++; | ||
197 | if (n == IPATH_SEGSZ) { | ||
198 | m++; | ||
199 | n = 0; | ||
200 | } | ||
201 | } | ||
202 | } | ||
203 | ret = &mr->ibmr; | ||
204 | |||
205 | bail: | ||
206 | return ret; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * ipath_dereg_mr - unregister and free a memory region | ||
211 | * @ibmr: the memory region to free | ||
212 | * | ||
213 | * Returns 0 on success. | ||
214 | * | ||
215 | * Note that this is called to free MRs created by ipath_get_dma_mr() | ||
216 | * or ipath_reg_user_mr(). | ||
217 | */ | ||
218 | int ipath_dereg_mr(struct ib_mr *ibmr) | ||
219 | { | ||
220 | struct ipath_mr *mr = to_imr(ibmr); | ||
221 | int i; | ||
222 | |||
223 | ipath_free_lkey(&to_idev(ibmr->device)->lk_table, ibmr->lkey); | ||
224 | i = mr->mr.mapsz; | ||
225 | while (i) { | ||
226 | i--; | ||
227 | kfree(mr->mr.map[i]); | ||
228 | } | ||
229 | kfree(mr); | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | /** | ||
234 | * ipath_alloc_fmr - allocate a fast memory region | ||
235 | * @pd: the protection domain for this memory region | ||
236 | * @mr_access_flags: access flags for this memory region | ||
237 | * @fmr_attr: fast memory region attributes | ||
238 | * | ||
239 | * Returns the memory region on success, otherwise returns an errno. | ||
240 | */ | ||
241 | struct ib_fmr *ipath_alloc_fmr(struct ib_pd *pd, int mr_access_flags, | ||
242 | struct ib_fmr_attr *fmr_attr) | ||
243 | { | ||
244 | struct ipath_fmr *fmr; | ||
245 | int m, i = 0; | ||
246 | struct ib_fmr *ret; | ||
247 | |||
248 | /* Allocate struct plus pointers to first level page tables. */ | ||
249 | m = (fmr_attr->max_pages + IPATH_SEGSZ - 1) / IPATH_SEGSZ; | ||
250 | fmr = kmalloc(sizeof *fmr + m * sizeof fmr->mr.map[0], GFP_KERNEL); | ||
251 | if (!fmr) | ||
252 | goto bail; | ||
253 | |||
254 | /* Allocate first level page tables. */ | ||
255 | for (; i < m; i++) { | ||
256 | fmr->mr.map[i] = kmalloc(sizeof *fmr->mr.map[0], | ||
257 | GFP_KERNEL); | ||
258 | if (!fmr->mr.map[i]) | ||
259 | goto bail; | ||
260 | } | ||
261 | fmr->mr.mapsz = m; | ||
262 | |||
263 | /* | ||
264 | * ib_alloc_fmr() will initialize fmr->ibfmr except for lkey & | ||
265 | * rkey. | ||
266 | */ | ||
267 | if (!ipath_alloc_lkey(&to_idev(pd->device)->lk_table, &fmr->mr)) | ||
268 | goto bail; | ||
269 | fmr->ibfmr.rkey = fmr->ibfmr.lkey = fmr->mr.lkey; | ||
270 | /* | ||
271 | * Resources are allocated but no valid mapping (RKEY can't be | ||
272 | * used). | ||
273 | */ | ||
274 | fmr->mr.user_base = 0; | ||
275 | fmr->mr.iova = 0; | ||
276 | fmr->mr.length = 0; | ||
277 | fmr->mr.offset = 0; | ||
278 | fmr->mr.access_flags = mr_access_flags; | ||
279 | fmr->mr.max_segs = fmr_attr->max_pages; | ||
280 | fmr->page_shift = fmr_attr->page_shift; | ||
281 | |||
282 | ret = &fmr->ibfmr; | ||
283 | goto done; | ||
284 | |||
285 | bail: | ||
286 | while (i) | ||
287 | kfree(fmr->mr.map[--i]); | ||
288 | kfree(fmr); | ||
289 | ret = ERR_PTR(-ENOMEM); | ||
290 | |||
291 | done: | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | /** | ||
296 | * ipath_map_phys_fmr - set up a fast memory region | ||
297 | * @ibmfr: the fast memory region to set up | ||
298 | * @page_list: the list of pages to associate with the fast memory region | ||
299 | * @list_len: the number of pages to associate with the fast memory region | ||
300 | * @iova: the virtual address of the start of the fast memory region | ||
301 | * | ||
302 | * This may be called from interrupt context. | ||
303 | */ | ||
304 | |||
305 | int ipath_map_phys_fmr(struct ib_fmr *ibfmr, u64 * page_list, | ||
306 | int list_len, u64 iova) | ||
307 | { | ||
308 | struct ipath_fmr *fmr = to_ifmr(ibfmr); | ||
309 | struct ipath_lkey_table *rkt; | ||
310 | unsigned long flags; | ||
311 | int m, n, i; | ||
312 | u32 ps; | ||
313 | int ret; | ||
314 | |||
315 | if (list_len > fmr->mr.max_segs) { | ||
316 | ret = -EINVAL; | ||
317 | goto bail; | ||
318 | } | ||
319 | rkt = &to_idev(ibfmr->device)->lk_table; | ||
320 | spin_lock_irqsave(&rkt->lock, flags); | ||
321 | fmr->mr.user_base = iova; | ||
322 | fmr->mr.iova = iova; | ||
323 | ps = 1 << fmr->page_shift; | ||
324 | fmr->mr.length = list_len * ps; | ||
325 | m = 0; | ||
326 | n = 0; | ||
327 | ps = 1 << fmr->page_shift; | ||
328 | for (i = 0; i < list_len; i++) { | ||
329 | fmr->mr.map[m]->segs[n].vaddr = phys_to_virt(page_list[i]); | ||
330 | fmr->mr.map[m]->segs[n].length = ps; | ||
331 | if (++n == IPATH_SEGSZ) { | ||
332 | m++; | ||
333 | n = 0; | ||
334 | } | ||
335 | } | ||
336 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
337 | ret = 0; | ||
338 | |||
339 | bail: | ||
340 | return ret; | ||
341 | } | ||
342 | |||
343 | /** | ||
344 | * ipath_unmap_fmr - unmap fast memory regions | ||
345 | * @fmr_list: the list of fast memory regions to unmap | ||
346 | * | ||
347 | * Returns 0 on success. | ||
348 | */ | ||
349 | int ipath_unmap_fmr(struct list_head *fmr_list) | ||
350 | { | ||
351 | struct ipath_fmr *fmr; | ||
352 | struct ipath_lkey_table *rkt; | ||
353 | unsigned long flags; | ||
354 | |||
355 | list_for_each_entry(fmr, fmr_list, ibfmr.list) { | ||
356 | rkt = &to_idev(fmr->ibfmr.device)->lk_table; | ||
357 | spin_lock_irqsave(&rkt->lock, flags); | ||
358 | fmr->mr.user_base = 0; | ||
359 | fmr->mr.iova = 0; | ||
360 | fmr->mr.length = 0; | ||
361 | spin_unlock_irqrestore(&rkt->lock, flags); | ||
362 | } | ||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | /** | ||
367 | * ipath_dealloc_fmr - deallocate a fast memory region | ||
368 | * @ibfmr: the fast memory region to deallocate | ||
369 | * | ||
370 | * Returns 0 on success. | ||
371 | */ | ||
372 | int ipath_dealloc_fmr(struct ib_fmr *ibfmr) | ||
373 | { | ||
374 | struct ipath_fmr *fmr = to_ifmr(ibfmr); | ||
375 | int i; | ||
376 | |||
377 | ipath_free_lkey(&to_idev(ibfmr->device)->lk_table, ibfmr->lkey); | ||
378 | i = fmr->mr.mapsz; | ||
379 | while (i) | ||
380 | kfree(fmr->mr.map[--i]); | ||
381 | kfree(fmr); | ||
382 | return 0; | ||
383 | } | ||