diff options
Diffstat (limited to 'drivers/infiniband/hw/mthca/mthca_memfree.c')
-rw-r--r-- | drivers/infiniband/hw/mthca/mthca_memfree.c | 465 |
1 files changed, 465 insertions, 0 deletions
diff --git a/drivers/infiniband/hw/mthca/mthca_memfree.c b/drivers/infiniband/hw/mthca/mthca_memfree.c new file mode 100644 index 000000000000..7730b5960616 --- /dev/null +++ b/drivers/infiniband/hw/mthca/mthca_memfree.c | |||
@@ -0,0 +1,465 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | * | ||
32 | * $Id$ | ||
33 | */ | ||
34 | |||
35 | #include "mthca_memfree.h" | ||
36 | #include "mthca_dev.h" | ||
37 | #include "mthca_cmd.h" | ||
38 | |||
39 | /* | ||
40 | * We allocate in as big chunks as we can, up to a maximum of 256 KB | ||
41 | * per chunk. | ||
42 | */ | ||
43 | enum { | ||
44 | MTHCA_ICM_ALLOC_SIZE = 1 << 18, | ||
45 | MTHCA_TABLE_CHUNK_SIZE = 1 << 18 | ||
46 | }; | ||
47 | |||
48 | void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm) | ||
49 | { | ||
50 | struct mthca_icm_chunk *chunk, *tmp; | ||
51 | int i; | ||
52 | |||
53 | if (!icm) | ||
54 | return; | ||
55 | |||
56 | list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) { | ||
57 | if (chunk->nsg > 0) | ||
58 | pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages, | ||
59 | PCI_DMA_BIDIRECTIONAL); | ||
60 | |||
61 | for (i = 0; i < chunk->npages; ++i) | ||
62 | __free_pages(chunk->mem[i].page, | ||
63 | get_order(chunk->mem[i].length)); | ||
64 | |||
65 | kfree(chunk); | ||
66 | } | ||
67 | |||
68 | kfree(icm); | ||
69 | } | ||
70 | |||
71 | struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, | ||
72 | unsigned int gfp_mask) | ||
73 | { | ||
74 | struct mthca_icm *icm; | ||
75 | struct mthca_icm_chunk *chunk = NULL; | ||
76 | int cur_order; | ||
77 | |||
78 | icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); | ||
79 | if (!icm) | ||
80 | return icm; | ||
81 | |||
82 | icm->refcount = 0; | ||
83 | INIT_LIST_HEAD(&icm->chunk_list); | ||
84 | |||
85 | cur_order = get_order(MTHCA_ICM_ALLOC_SIZE); | ||
86 | |||
87 | while (npages > 0) { | ||
88 | if (!chunk) { | ||
89 | chunk = kmalloc(sizeof *chunk, | ||
90 | gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); | ||
91 | if (!chunk) | ||
92 | goto fail; | ||
93 | |||
94 | chunk->npages = 0; | ||
95 | chunk->nsg = 0; | ||
96 | list_add_tail(&chunk->list, &icm->chunk_list); | ||
97 | } | ||
98 | |||
99 | while (1 << cur_order > npages) | ||
100 | --cur_order; | ||
101 | |||
102 | chunk->mem[chunk->npages].page = alloc_pages(gfp_mask, cur_order); | ||
103 | if (chunk->mem[chunk->npages].page) { | ||
104 | chunk->mem[chunk->npages].length = PAGE_SIZE << cur_order; | ||
105 | chunk->mem[chunk->npages].offset = 0; | ||
106 | |||
107 | if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) { | ||
108 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | ||
109 | chunk->npages, | ||
110 | PCI_DMA_BIDIRECTIONAL); | ||
111 | |||
112 | if (chunk->nsg <= 0) | ||
113 | goto fail; | ||
114 | |||
115 | chunk = NULL; | ||
116 | } | ||
117 | |||
118 | npages -= 1 << cur_order; | ||
119 | } else { | ||
120 | --cur_order; | ||
121 | if (cur_order < 0) | ||
122 | goto fail; | ||
123 | } | ||
124 | } | ||
125 | |||
126 | if (chunk) { | ||
127 | chunk->nsg = pci_map_sg(dev->pdev, chunk->mem, | ||
128 | chunk->npages, | ||
129 | PCI_DMA_BIDIRECTIONAL); | ||
130 | |||
131 | if (chunk->nsg <= 0) | ||
132 | goto fail; | ||
133 | } | ||
134 | |||
135 | return icm; | ||
136 | |||
137 | fail: | ||
138 | mthca_free_icm(dev, icm); | ||
139 | return NULL; | ||
140 | } | ||
141 | |||
142 | int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) | ||
143 | { | ||
144 | int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; | ||
145 | int ret = 0; | ||
146 | u8 status; | ||
147 | |||
148 | down(&table->mutex); | ||
149 | |||
150 | if (table->icm[i]) { | ||
151 | ++table->icm[i]->refcount; | ||
152 | goto out; | ||
153 | } | ||
154 | |||
155 | table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, | ||
156 | (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | ||
157 | __GFP_NOWARN); | ||
158 | if (!table->icm[i]) { | ||
159 | ret = -ENOMEM; | ||
160 | goto out; | ||
161 | } | ||
162 | |||
163 | if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, | ||
164 | &status) || status) { | ||
165 | mthca_free_icm(dev, table->icm[i]); | ||
166 | table->icm[i] = NULL; | ||
167 | ret = -ENOMEM; | ||
168 | goto out; | ||
169 | } | ||
170 | |||
171 | ++table->icm[i]->refcount; | ||
172 | |||
173 | out: | ||
174 | up(&table->mutex); | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) | ||
179 | { | ||
180 | int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; | ||
181 | u8 status; | ||
182 | |||
183 | down(&table->mutex); | ||
184 | |||
185 | if (--table->icm[i]->refcount == 0) { | ||
186 | mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, | ||
187 | MTHCA_TABLE_CHUNK_SIZE >> 12, &status); | ||
188 | mthca_free_icm(dev, table->icm[i]); | ||
189 | table->icm[i] = NULL; | ||
190 | } | ||
191 | |||
192 | up(&table->mutex); | ||
193 | } | ||
194 | |||
195 | struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, | ||
196 | u64 virt, int obj_size, | ||
197 | int nobj, int reserved, | ||
198 | int use_lowmem) | ||
199 | { | ||
200 | struct mthca_icm_table *table; | ||
201 | int num_icm; | ||
202 | int i; | ||
203 | u8 status; | ||
204 | |||
205 | num_icm = obj_size * nobj / MTHCA_TABLE_CHUNK_SIZE; | ||
206 | |||
207 | table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); | ||
208 | if (!table) | ||
209 | return NULL; | ||
210 | |||
211 | table->virt = virt; | ||
212 | table->num_icm = num_icm; | ||
213 | table->num_obj = nobj; | ||
214 | table->obj_size = obj_size; | ||
215 | table->lowmem = use_lowmem; | ||
216 | init_MUTEX(&table->mutex); | ||
217 | |||
218 | for (i = 0; i < num_icm; ++i) | ||
219 | table->icm[i] = NULL; | ||
220 | |||
221 | for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { | ||
222 | table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, | ||
223 | (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | | ||
224 | __GFP_NOWARN); | ||
225 | if (!table->icm[i]) | ||
226 | goto err; | ||
227 | if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, | ||
228 | &status) || status) { | ||
229 | mthca_free_icm(dev, table->icm[i]); | ||
230 | table->icm[i] = NULL; | ||
231 | goto err; | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Add a reference to this ICM chunk so that it never | ||
236 | * gets freed (since it contains reserved firmware objects). | ||
237 | */ | ||
238 | ++table->icm[i]->refcount; | ||
239 | } | ||
240 | |||
241 | return table; | ||
242 | |||
243 | err: | ||
244 | for (i = 0; i < num_icm; ++i) | ||
245 | if (table->icm[i]) { | ||
246 | mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, | ||
247 | MTHCA_TABLE_CHUNK_SIZE >> 12, &status); | ||
248 | mthca_free_icm(dev, table->icm[i]); | ||
249 | } | ||
250 | |||
251 | kfree(table); | ||
252 | |||
253 | return NULL; | ||
254 | } | ||
255 | |||
256 | void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) | ||
257 | { | ||
258 | int i; | ||
259 | u8 status; | ||
260 | |||
261 | for (i = 0; i < table->num_icm; ++i) | ||
262 | if (table->icm[i]) { | ||
263 | mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, | ||
264 | MTHCA_TABLE_CHUNK_SIZE >> 12, &status); | ||
265 | mthca_free_icm(dev, table->icm[i]); | ||
266 | } | ||
267 | |||
268 | kfree(table); | ||
269 | } | ||
270 | |||
271 | static u64 mthca_uarc_virt(struct mthca_dev *dev, int page) | ||
272 | { | ||
273 | return dev->uar_table.uarc_base + | ||
274 | dev->driver_uar.index * dev->uar_table.uarc_size + | ||
275 | page * 4096; | ||
276 | } | ||
277 | |||
278 | int mthca_alloc_db(struct mthca_dev *dev, int type, u32 qn, u32 **db) | ||
279 | { | ||
280 | int group; | ||
281 | int start, end, dir; | ||
282 | int i, j; | ||
283 | struct mthca_db_page *page; | ||
284 | int ret = 0; | ||
285 | u8 status; | ||
286 | |||
287 | down(&dev->db_tab->mutex); | ||
288 | |||
289 | switch (type) { | ||
290 | case MTHCA_DB_TYPE_CQ_ARM: | ||
291 | case MTHCA_DB_TYPE_SQ: | ||
292 | group = 0; | ||
293 | start = 0; | ||
294 | end = dev->db_tab->max_group1; | ||
295 | dir = 1; | ||
296 | break; | ||
297 | |||
298 | case MTHCA_DB_TYPE_CQ_SET_CI: | ||
299 | case MTHCA_DB_TYPE_RQ: | ||
300 | case MTHCA_DB_TYPE_SRQ: | ||
301 | group = 1; | ||
302 | start = dev->db_tab->npages - 1; | ||
303 | end = dev->db_tab->min_group2; | ||
304 | dir = -1; | ||
305 | break; | ||
306 | |||
307 | default: | ||
308 | return -1; | ||
309 | } | ||
310 | |||
311 | for (i = start; i != end; i += dir) | ||
312 | if (dev->db_tab->page[i].db_rec && | ||
313 | !bitmap_full(dev->db_tab->page[i].used, | ||
314 | MTHCA_DB_REC_PER_PAGE)) { | ||
315 | page = dev->db_tab->page + i; | ||
316 | goto found; | ||
317 | } | ||
318 | |||
319 | if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { | ||
320 | ret = -ENOMEM; | ||
321 | goto out; | ||
322 | } | ||
323 | |||
324 | page = dev->db_tab->page + end; | ||
325 | page->db_rec = dma_alloc_coherent(&dev->pdev->dev, 4096, | ||
326 | &page->mapping, GFP_KERNEL); | ||
327 | if (!page->db_rec) { | ||
328 | ret = -ENOMEM; | ||
329 | goto out; | ||
330 | } | ||
331 | memset(page->db_rec, 0, 4096); | ||
332 | |||
333 | ret = mthca_MAP_ICM_page(dev, page->mapping, mthca_uarc_virt(dev, i), &status); | ||
334 | if (!ret && status) | ||
335 | ret = -EINVAL; | ||
336 | if (ret) { | ||
337 | dma_free_coherent(&dev->pdev->dev, 4096, | ||
338 | page->db_rec, page->mapping); | ||
339 | goto out; | ||
340 | } | ||
341 | |||
342 | bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); | ||
343 | if (group == 0) | ||
344 | ++dev->db_tab->max_group1; | ||
345 | else | ||
346 | --dev->db_tab->min_group2; | ||
347 | |||
348 | found: | ||
349 | j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); | ||
350 | set_bit(j, page->used); | ||
351 | |||
352 | if (group == 1) | ||
353 | j = MTHCA_DB_REC_PER_PAGE - 1 - j; | ||
354 | |||
355 | ret = i * MTHCA_DB_REC_PER_PAGE + j; | ||
356 | |||
357 | page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5)); | ||
358 | |||
359 | *db = (u32 *) &page->db_rec[j]; | ||
360 | |||
361 | out: | ||
362 | up(&dev->db_tab->mutex); | ||
363 | |||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | void mthca_free_db(struct mthca_dev *dev, int type, int db_index) | ||
368 | { | ||
369 | int i, j; | ||
370 | struct mthca_db_page *page; | ||
371 | u8 status; | ||
372 | |||
373 | i = db_index / MTHCA_DB_REC_PER_PAGE; | ||
374 | j = db_index % MTHCA_DB_REC_PER_PAGE; | ||
375 | |||
376 | page = dev->db_tab->page + i; | ||
377 | |||
378 | down(&dev->db_tab->mutex); | ||
379 | |||
380 | page->db_rec[j] = 0; | ||
381 | if (i >= dev->db_tab->min_group2) | ||
382 | j = MTHCA_DB_REC_PER_PAGE - 1 - j; | ||
383 | clear_bit(j, page->used); | ||
384 | |||
385 | if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && | ||
386 | i >= dev->db_tab->max_group1 - 1) { | ||
387 | mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status); | ||
388 | |||
389 | dma_free_coherent(&dev->pdev->dev, 4096, | ||
390 | page->db_rec, page->mapping); | ||
391 | page->db_rec = NULL; | ||
392 | |||
393 | if (i == dev->db_tab->max_group1) { | ||
394 | --dev->db_tab->max_group1; | ||
395 | /* XXX may be able to unmap more pages now */ | ||
396 | } | ||
397 | if (i == dev->db_tab->min_group2) | ||
398 | ++dev->db_tab->min_group2; | ||
399 | } | ||
400 | |||
401 | up(&dev->db_tab->mutex); | ||
402 | } | ||
403 | |||
404 | int mthca_init_db_tab(struct mthca_dev *dev) | ||
405 | { | ||
406 | int i; | ||
407 | |||
408 | if (dev->hca_type != ARBEL_NATIVE) | ||
409 | return 0; | ||
410 | |||
411 | dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); | ||
412 | if (!dev->db_tab) | ||
413 | return -ENOMEM; | ||
414 | |||
415 | init_MUTEX(&dev->db_tab->mutex); | ||
416 | |||
417 | dev->db_tab->npages = dev->uar_table.uarc_size / PAGE_SIZE; | ||
418 | dev->db_tab->max_group1 = 0; | ||
419 | dev->db_tab->min_group2 = dev->db_tab->npages - 1; | ||
420 | |||
421 | dev->db_tab->page = kmalloc(dev->db_tab->npages * | ||
422 | sizeof *dev->db_tab->page, | ||
423 | GFP_KERNEL); | ||
424 | if (!dev->db_tab->page) { | ||
425 | kfree(dev->db_tab); | ||
426 | return -ENOMEM; | ||
427 | } | ||
428 | |||
429 | for (i = 0; i < dev->db_tab->npages; ++i) | ||
430 | dev->db_tab->page[i].db_rec = NULL; | ||
431 | |||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | void mthca_cleanup_db_tab(struct mthca_dev *dev) | ||
436 | { | ||
437 | int i; | ||
438 | u8 status; | ||
439 | |||
440 | if (dev->hca_type != ARBEL_NATIVE) | ||
441 | return; | ||
442 | |||
443 | /* | ||
444 | * Because we don't always free our UARC pages when they | ||
445 | * become empty to make mthca_free_db() simpler we need to | ||
446 | * make a sweep through the doorbell pages and free any | ||
447 | * leftover pages now. | ||
448 | */ | ||
449 | for (i = 0; i < dev->db_tab->npages; ++i) { | ||
450 | if (!dev->db_tab->page[i].db_rec) | ||
451 | continue; | ||
452 | |||
453 | if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) | ||
454 | mthca_warn(dev, "Kernel UARC page %d not empty\n", i); | ||
455 | |||
456 | mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, i), 1, &status); | ||
457 | |||
458 | dma_free_coherent(&dev->pdev->dev, 4096, | ||
459 | dev->db_tab->page[i].db_rec, | ||
460 | dev->db_tab->page[i].mapping); | ||
461 | } | ||
462 | |||
463 | kfree(dev->db_tab->page); | ||
464 | kfree(dev->db_tab); | ||
465 | } | ||