diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_page_alloc_dma.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 1134 |
1 files changed, 1134 insertions, 0 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c new file mode 100644 index 000000000000..7a4779304877 --- /dev/null +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | |||
@@ -0,0 +1,1134 @@ | |||
1 | /* | ||
2 | * Copyright 2011 (c) Oracle Corp. | ||
3 | |||
4 | * Permission is hereby granted, free of charge, to any person obtaining a | ||
5 | * copy of this software and associated documentation files (the "Software"), | ||
6 | * to deal in the Software without restriction, including without limitation | ||
7 | * the rights to use, copy, modify, merge, publish, distribute, sub license, | ||
8 | * and/or sell copies of the Software, and to permit persons to whom the | ||
9 | * Software is furnished to do so, subject to the following conditions: | ||
10 | * | ||
11 | * The above copyright notice and this permission notice (including the | ||
12 | * next paragraph) shall be included in all copies or substantial portions | ||
13 | * of the Software. | ||
14 | * | ||
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | ||
21 | * DEALINGS IN THE SOFTWARE. | ||
22 | * | ||
23 | * Author: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> | ||
24 | */ | ||
25 | |||
26 | /* | ||
27 | * A simple DMA pool losely based on dmapool.c. It has certain advantages | ||
28 | * over the DMA pools: | ||
29 | * - Pool collects resently freed pages for reuse (and hooks up to | ||
30 | * the shrinker). | ||
31 | * - Tracks currently in use pages | ||
32 | * - Tracks whether the page is UC, WB or cached (and reverts to WB | ||
33 | * when freed). | ||
34 | */ | ||
35 | |||
36 | #include <linux/dma-mapping.h> | ||
37 | #include <linux/list.h> | ||
38 | #include <linux/seq_file.h> /* for seq_printf */ | ||
39 | #include <linux/slab.h> | ||
40 | #include <linux/spinlock.h> | ||
41 | #include <linux/highmem.h> | ||
42 | #include <linux/mm_types.h> | ||
43 | #include <linux/module.h> | ||
44 | #include <linux/mm.h> | ||
45 | #include <linux/atomic.h> | ||
46 | #include <linux/device.h> | ||
47 | #include <linux/kthread.h> | ||
48 | #include "ttm/ttm_bo_driver.h" | ||
49 | #include "ttm/ttm_page_alloc.h" | ||
50 | #ifdef TTM_HAS_AGP | ||
51 | #include <asm/agp.h> | ||
52 | #endif | ||
53 | |||
54 | #define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *)) | ||
55 | #define SMALL_ALLOCATION 4 | ||
56 | #define FREE_ALL_PAGES (~0U) | ||
57 | /* times are in msecs */ | ||
58 | #define IS_UNDEFINED (0) | ||
59 | #define IS_WC (1<<1) | ||
60 | #define IS_UC (1<<2) | ||
61 | #define IS_CACHED (1<<3) | ||
62 | #define IS_DMA32 (1<<4) | ||
63 | |||
64 | enum pool_type { | ||
65 | POOL_IS_UNDEFINED, | ||
66 | POOL_IS_WC = IS_WC, | ||
67 | POOL_IS_UC = IS_UC, | ||
68 | POOL_IS_CACHED = IS_CACHED, | ||
69 | POOL_IS_WC_DMA32 = IS_WC | IS_DMA32, | ||
70 | POOL_IS_UC_DMA32 = IS_UC | IS_DMA32, | ||
71 | POOL_IS_CACHED_DMA32 = IS_CACHED | IS_DMA32, | ||
72 | }; | ||
73 | /* | ||
74 | * The pool structure. There are usually six pools: | ||
75 | * - generic (not restricted to DMA32): | ||
76 | * - write combined, uncached, cached. | ||
77 | * - dma32 (up to 2^32 - so up 4GB): | ||
78 | * - write combined, uncached, cached. | ||
79 | * for each 'struct device'. The 'cached' is for pages that are actively used. | ||
80 | * The other ones can be shrunk by the shrinker API if neccessary. | ||
81 | * @pools: The 'struct device->dma_pools' link. | ||
82 | * @type: Type of the pool | ||
83 | * @lock: Protects the inuse_list and free_list from concurrnet access. Must be | ||
84 | * used with irqsave/irqrestore variants because pool allocator maybe called | ||
85 | * from delayed work. | ||
86 | * @inuse_list: Pool of pages that are in use. The order is very important and | ||
87 | * it is in the order that the TTM pages that are put back are in. | ||
88 | * @free_list: Pool of pages that are free to be used. No order requirements. | ||
89 | * @dev: The device that is associated with these pools. | ||
90 | * @size: Size used during DMA allocation. | ||
91 | * @npages_free: Count of available pages for re-use. | ||
92 | * @npages_in_use: Count of pages that are in use. | ||
93 | * @nfrees: Stats when pool is shrinking. | ||
94 | * @nrefills: Stats when the pool is grown. | ||
95 | * @gfp_flags: Flags to pass for alloc_page. | ||
96 | * @name: Name of the pool. | ||
97 | * @dev_name: Name derieved from dev - similar to how dev_info works. | ||
98 | * Used during shutdown as the dev_info during release is unavailable. | ||
99 | */ | ||
100 | struct dma_pool { | ||
101 | struct list_head pools; /* The 'struct device->dma_pools link */ | ||
102 | enum pool_type type; | ||
103 | spinlock_t lock; | ||
104 | struct list_head inuse_list; | ||
105 | struct list_head free_list; | ||
106 | struct device *dev; | ||
107 | unsigned size; | ||
108 | unsigned npages_free; | ||
109 | unsigned npages_in_use; | ||
110 | unsigned long nfrees; /* Stats when shrunk. */ | ||
111 | unsigned long nrefills; /* Stats when grown. */ | ||
112 | gfp_t gfp_flags; | ||
113 | char name[13]; /* "cached dma32" */ | ||
114 | char dev_name[64]; /* Constructed from dev */ | ||
115 | }; | ||
116 | |||
117 | /* | ||
118 | * The accounting page keeping track of the allocated page along with | ||
119 | * the DMA address. | ||
120 | * @page_list: The link to the 'page_list' in 'struct dma_pool'. | ||
121 | * @vaddr: The virtual address of the page | ||
122 | * @dma: The bus address of the page. If the page is not allocated | ||
123 | * via the DMA API, it will be -1. | ||
124 | */ | ||
125 | struct dma_page { | ||
126 | struct list_head page_list; | ||
127 | void *vaddr; | ||
128 | struct page *p; | ||
129 | dma_addr_t dma; | ||
130 | }; | ||
131 | |||
132 | /* | ||
133 | * Limits for the pool. They are handled without locks because only place where | ||
134 | * they may change is in sysfs store. They won't have immediate effect anyway | ||
135 | * so forcing serialization to access them is pointless. | ||
136 | */ | ||
137 | |||
138 | struct ttm_pool_opts { | ||
139 | unsigned alloc_size; | ||
140 | unsigned max_size; | ||
141 | unsigned small; | ||
142 | }; | ||
143 | |||
144 | /* | ||
145 | * Contains the list of all of the 'struct device' and their corresponding | ||
146 | * DMA pools. Guarded by _mutex->lock. | ||
147 | * @pools: The link to 'struct ttm_pool_manager->pools' | ||
148 | * @dev: The 'struct device' associated with the 'pool' | ||
149 | * @pool: The 'struct dma_pool' associated with the 'dev' | ||
150 | */ | ||
151 | struct device_pools { | ||
152 | struct list_head pools; | ||
153 | struct device *dev; | ||
154 | struct dma_pool *pool; | ||
155 | }; | ||
156 | |||
157 | /* | ||
158 | * struct ttm_pool_manager - Holds memory pools for fast allocation | ||
159 | * | ||
160 | * @lock: Lock used when adding/removing from pools | ||
161 | * @pools: List of 'struct device' and 'struct dma_pool' tuples. | ||
162 | * @options: Limits for the pool. | ||
163 | * @npools: Total amount of pools in existence. | ||
164 | * @shrinker: The structure used by [un|]register_shrinker | ||
165 | */ | ||
166 | struct ttm_pool_manager { | ||
167 | struct mutex lock; | ||
168 | struct list_head pools; | ||
169 | struct ttm_pool_opts options; | ||
170 | unsigned npools; | ||
171 | struct shrinker mm_shrink; | ||
172 | struct kobject kobj; | ||
173 | }; | ||
174 | |||
175 | static struct ttm_pool_manager *_manager; | ||
176 | |||
177 | static struct attribute ttm_page_pool_max = { | ||
178 | .name = "pool_max_size", | ||
179 | .mode = S_IRUGO | S_IWUSR | ||
180 | }; | ||
181 | static struct attribute ttm_page_pool_small = { | ||
182 | .name = "pool_small_allocation", | ||
183 | .mode = S_IRUGO | S_IWUSR | ||
184 | }; | ||
185 | static struct attribute ttm_page_pool_alloc_size = { | ||
186 | .name = "pool_allocation_size", | ||
187 | .mode = S_IRUGO | S_IWUSR | ||
188 | }; | ||
189 | |||
190 | static struct attribute *ttm_pool_attrs[] = { | ||
191 | &ttm_page_pool_max, | ||
192 | &ttm_page_pool_small, | ||
193 | &ttm_page_pool_alloc_size, | ||
194 | NULL | ||
195 | }; | ||
196 | |||
197 | static void ttm_pool_kobj_release(struct kobject *kobj) | ||
198 | { | ||
199 | struct ttm_pool_manager *m = | ||
200 | container_of(kobj, struct ttm_pool_manager, kobj); | ||
201 | kfree(m); | ||
202 | } | ||
203 | |||
204 | static ssize_t ttm_pool_store(struct kobject *kobj, struct attribute *attr, | ||
205 | const char *buffer, size_t size) | ||
206 | { | ||
207 | struct ttm_pool_manager *m = | ||
208 | container_of(kobj, struct ttm_pool_manager, kobj); | ||
209 | int chars; | ||
210 | unsigned val; | ||
211 | chars = sscanf(buffer, "%u", &val); | ||
212 | if (chars == 0) | ||
213 | return size; | ||
214 | |||
215 | /* Convert kb to number of pages */ | ||
216 | val = val / (PAGE_SIZE >> 10); | ||
217 | |||
218 | if (attr == &ttm_page_pool_max) | ||
219 | m->options.max_size = val; | ||
220 | else if (attr == &ttm_page_pool_small) | ||
221 | m->options.small = val; | ||
222 | else if (attr == &ttm_page_pool_alloc_size) { | ||
223 | if (val > NUM_PAGES_TO_ALLOC*8) { | ||
224 | printk(KERN_ERR TTM_PFX | ||
225 | "Setting allocation size to %lu " | ||
226 | "is not allowed. Recommended size is " | ||
227 | "%lu\n", | ||
228 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7), | ||
229 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
230 | return size; | ||
231 | } else if (val > NUM_PAGES_TO_ALLOC) { | ||
232 | printk(KERN_WARNING TTM_PFX | ||
233 | "Setting allocation size to " | ||
234 | "larger than %lu is not recommended.\n", | ||
235 | NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10)); | ||
236 | } | ||
237 | m->options.alloc_size = val; | ||
238 | } | ||
239 | |||
240 | return size; | ||
241 | } | ||
242 | |||
243 | static ssize_t ttm_pool_show(struct kobject *kobj, struct attribute *attr, | ||
244 | char *buffer) | ||
245 | { | ||
246 | struct ttm_pool_manager *m = | ||
247 | container_of(kobj, struct ttm_pool_manager, kobj); | ||
248 | unsigned val = 0; | ||
249 | |||
250 | if (attr == &ttm_page_pool_max) | ||
251 | val = m->options.max_size; | ||
252 | else if (attr == &ttm_page_pool_small) | ||
253 | val = m->options.small; | ||
254 | else if (attr == &ttm_page_pool_alloc_size) | ||
255 | val = m->options.alloc_size; | ||
256 | |||
257 | val = val * (PAGE_SIZE >> 10); | ||
258 | |||
259 | return snprintf(buffer, PAGE_SIZE, "%u\n", val); | ||
260 | } | ||
261 | |||
262 | static const struct sysfs_ops ttm_pool_sysfs_ops = { | ||
263 | .show = &ttm_pool_show, | ||
264 | .store = &ttm_pool_store, | ||
265 | }; | ||
266 | |||
267 | static struct kobj_type ttm_pool_kobj_type = { | ||
268 | .release = &ttm_pool_kobj_release, | ||
269 | .sysfs_ops = &ttm_pool_sysfs_ops, | ||
270 | .default_attrs = ttm_pool_attrs, | ||
271 | }; | ||
272 | |||
273 | #ifndef CONFIG_X86 | ||
274 | static int set_pages_array_wb(struct page **pages, int addrinarray) | ||
275 | { | ||
276 | #ifdef TTM_HAS_AGP | ||
277 | int i; | ||
278 | |||
279 | for (i = 0; i < addrinarray; i++) | ||
280 | unmap_page_from_agp(pages[i]); | ||
281 | #endif | ||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int set_pages_array_wc(struct page **pages, int addrinarray) | ||
286 | { | ||
287 | #ifdef TTM_HAS_AGP | ||
288 | int i; | ||
289 | |||
290 | for (i = 0; i < addrinarray; i++) | ||
291 | map_page_into_agp(pages[i]); | ||
292 | #endif | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static int set_pages_array_uc(struct page **pages, int addrinarray) | ||
297 | { | ||
298 | #ifdef TTM_HAS_AGP | ||
299 | int i; | ||
300 | |||
301 | for (i = 0; i < addrinarray; i++) | ||
302 | map_page_into_agp(pages[i]); | ||
303 | #endif | ||
304 | return 0; | ||
305 | } | ||
306 | #endif /* for !CONFIG_X86 */ | ||
307 | |||
308 | static int ttm_set_pages_caching(struct dma_pool *pool, | ||
309 | struct page **pages, unsigned cpages) | ||
310 | { | ||
311 | int r = 0; | ||
312 | /* Set page caching */ | ||
313 | if (pool->type & IS_UC) { | ||
314 | r = set_pages_array_uc(pages, cpages); | ||
315 | if (r) | ||
316 | pr_err(TTM_PFX | ||
317 | "%s: Failed to set %d pages to uc!\n", | ||
318 | pool->dev_name, cpages); | ||
319 | } | ||
320 | if (pool->type & IS_WC) { | ||
321 | r = set_pages_array_wc(pages, cpages); | ||
322 | if (r) | ||
323 | pr_err(TTM_PFX | ||
324 | "%s: Failed to set %d pages to wc!\n", | ||
325 | pool->dev_name, cpages); | ||
326 | } | ||
327 | return r; | ||
328 | } | ||
329 | |||
330 | static void __ttm_dma_free_page(struct dma_pool *pool, struct dma_page *d_page) | ||
331 | { | ||
332 | dma_addr_t dma = d_page->dma; | ||
333 | dma_free_coherent(pool->dev, pool->size, d_page->vaddr, dma); | ||
334 | |||
335 | kfree(d_page); | ||
336 | d_page = NULL; | ||
337 | } | ||
338 | static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) | ||
339 | { | ||
340 | struct dma_page *d_page; | ||
341 | |||
342 | d_page = kmalloc(sizeof(struct dma_page), GFP_KERNEL); | ||
343 | if (!d_page) | ||
344 | return NULL; | ||
345 | |||
346 | d_page->vaddr = dma_alloc_coherent(pool->dev, pool->size, | ||
347 | &d_page->dma, | ||
348 | pool->gfp_flags); | ||
349 | if (d_page->vaddr) | ||
350 | d_page->p = virt_to_page(d_page->vaddr); | ||
351 | else { | ||
352 | kfree(d_page); | ||
353 | d_page = NULL; | ||
354 | } | ||
355 | return d_page; | ||
356 | } | ||
357 | static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) | ||
358 | { | ||
359 | enum pool_type type = IS_UNDEFINED; | ||
360 | |||
361 | if (flags & TTM_PAGE_FLAG_DMA32) | ||
362 | type |= IS_DMA32; | ||
363 | if (cstate == tt_cached) | ||
364 | type |= IS_CACHED; | ||
365 | else if (cstate == tt_uncached) | ||
366 | type |= IS_UC; | ||
367 | else | ||
368 | type |= IS_WC; | ||
369 | |||
370 | return type; | ||
371 | } | ||
372 | |||
373 | static void ttm_pool_update_free_locked(struct dma_pool *pool, | ||
374 | unsigned freed_pages) | ||
375 | { | ||
376 | pool->npages_free -= freed_pages; | ||
377 | pool->nfrees += freed_pages; | ||
378 | |||
379 | } | ||
380 | |||
381 | /* set memory back to wb and free the pages. */ | ||
382 | static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages, | ||
383 | struct page *pages[], unsigned npages) | ||
384 | { | ||
385 | struct dma_page *d_page, *tmp; | ||
386 | |||
387 | if (npages && set_pages_array_wb(pages, npages)) | ||
388 | pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", | ||
389 | pool->dev_name, npages); | ||
390 | |||
391 | list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { | ||
392 | list_del(&d_page->page_list); | ||
393 | __ttm_dma_free_page(pool, d_page); | ||
394 | } | ||
395 | } | ||
396 | |||
397 | static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) | ||
398 | { | ||
399 | if (set_pages_array_wb(&d_page->p, 1)) | ||
400 | pr_err(TTM_PFX "%s: Failed to set %d pages to wb!\n", | ||
401 | pool->dev_name, 1); | ||
402 | |||
403 | list_del(&d_page->page_list); | ||
404 | __ttm_dma_free_page(pool, d_page); | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * Free pages from pool. | ||
409 | * | ||
410 | * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC | ||
411 | * number of pages in one go. | ||
412 | * | ||
413 | * @pool: to free the pages from | ||
414 | * @nr_free: If set to true will free all pages in pool | ||
415 | **/ | ||
416 | static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) | ||
417 | { | ||
418 | unsigned long irq_flags; | ||
419 | struct dma_page *dma_p, *tmp; | ||
420 | struct page **pages_to_free; | ||
421 | struct list_head d_pages; | ||
422 | unsigned freed_pages = 0, | ||
423 | npages_to_free = nr_free; | ||
424 | |||
425 | if (NUM_PAGES_TO_ALLOC < nr_free) | ||
426 | npages_to_free = NUM_PAGES_TO_ALLOC; | ||
427 | #if 0 | ||
428 | if (nr_free > 1) { | ||
429 | pr_debug("%s: (%s:%d) Attempting to free %d (%d) pages\n", | ||
430 | pool->dev_name, pool->name, current->pid, | ||
431 | npages_to_free, nr_free); | ||
432 | } | ||
433 | #endif | ||
434 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | ||
435 | GFP_KERNEL); | ||
436 | |||
437 | if (!pages_to_free) { | ||
438 | pr_err(TTM_PFX | ||
439 | "%s: Failed to allocate memory for pool free operation.\n", | ||
440 | pool->dev_name); | ||
441 | return 0; | ||
442 | } | ||
443 | INIT_LIST_HEAD(&d_pages); | ||
444 | restart: | ||
445 | spin_lock_irqsave(&pool->lock, irq_flags); | ||
446 | |||
447 | /* We picking the oldest ones off the list */ | ||
448 | list_for_each_entry_safe_reverse(dma_p, tmp, &pool->free_list, | ||
449 | page_list) { | ||
450 | if (freed_pages >= npages_to_free) | ||
451 | break; | ||
452 | |||
453 | /* Move the dma_page from one list to another. */ | ||
454 | list_move(&dma_p->page_list, &d_pages); | ||
455 | |||
456 | pages_to_free[freed_pages++] = dma_p->p; | ||
457 | /* We can only remove NUM_PAGES_TO_ALLOC at a time. */ | ||
458 | if (freed_pages >= NUM_PAGES_TO_ALLOC) { | ||
459 | |||
460 | ttm_pool_update_free_locked(pool, freed_pages); | ||
461 | /** | ||
462 | * Because changing page caching is costly | ||
463 | * we unlock the pool to prevent stalling. | ||
464 | */ | ||
465 | spin_unlock_irqrestore(&pool->lock, irq_flags); | ||
466 | |||
467 | ttm_dma_pages_put(pool, &d_pages, pages_to_free, | ||
468 | freed_pages); | ||
469 | |||
470 | INIT_LIST_HEAD(&d_pages); | ||
471 | |||
472 | if (likely(nr_free != FREE_ALL_PAGES)) | ||
473 | nr_free -= freed_pages; | ||
474 | |||
475 | if (NUM_PAGES_TO_ALLOC >= nr_free) | ||
476 | npages_to_free = nr_free; | ||
477 | else | ||
478 | npages_to_free = NUM_PAGES_TO_ALLOC; | ||
479 | |||
480 | freed_pages = 0; | ||
481 | |||
482 | /* free all so restart the processing */ | ||
483 | if (nr_free) | ||
484 | goto restart; | ||
485 | |||
486 | /* Not allowed to fall through or break because | ||
487 | * following context is inside spinlock while we are | ||
488 | * outside here. | ||
489 | */ | ||
490 | goto out; | ||
491 | |||
492 | } | ||
493 | } | ||
494 | |||
495 | /* remove range of pages from the pool */ | ||
496 | if (freed_pages) { | ||
497 | ttm_pool_update_free_locked(pool, freed_pages); | ||
498 | nr_free -= freed_pages; | ||
499 | } | ||
500 | |||
501 | spin_unlock_irqrestore(&pool->lock, irq_flags); | ||
502 | |||
503 | if (freed_pages) | ||
504 | ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); | ||
505 | out: | ||
506 | kfree(pages_to_free); | ||
507 | return nr_free; | ||
508 | } | ||
509 | |||
510 | static void ttm_dma_free_pool(struct device *dev, enum pool_type type) | ||
511 | { | ||
512 | struct device_pools *p; | ||
513 | struct dma_pool *pool; | ||
514 | |||
515 | if (!dev) | ||
516 | return; | ||
517 | |||
518 | mutex_lock(&_manager->lock); | ||
519 | list_for_each_entry_reverse(p, &_manager->pools, pools) { | ||
520 | if (p->dev != dev) | ||
521 | continue; | ||
522 | pool = p->pool; | ||
523 | if (pool->type != type) | ||
524 | continue; | ||
525 | |||
526 | list_del(&p->pools); | ||
527 | kfree(p); | ||
528 | _manager->npools--; | ||
529 | break; | ||
530 | } | ||
531 | list_for_each_entry_reverse(pool, &dev->dma_pools, pools) { | ||
532 | if (pool->type != type) | ||
533 | continue; | ||
534 | /* Takes a spinlock.. */ | ||
535 | ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); | ||
536 | WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); | ||
537 | /* This code path is called after _all_ references to the | ||
538 | * struct device has been dropped - so nobody should be | ||
539 | * touching it. In case somebody is trying to _add_ we are | ||
540 | * guarded by the mutex. */ | ||
541 | list_del(&pool->pools); | ||
542 | kfree(pool); | ||
543 | break; | ||
544 | } | ||
545 | mutex_unlock(&_manager->lock); | ||
546 | } | ||
547 | |||
548 | /* | ||
549 | * On free-ing of the 'struct device' this deconstructor is run. | ||
550 | * Albeit the pool might have already been freed earlier. | ||
551 | */ | ||
552 | static void ttm_dma_pool_release(struct device *dev, void *res) | ||
553 | { | ||
554 | struct dma_pool *pool = *(struct dma_pool **)res; | ||
555 | |||
556 | if (pool) | ||
557 | ttm_dma_free_pool(dev, pool->type); | ||
558 | } | ||
559 | |||
560 | static int ttm_dma_pool_match(struct device *dev, void *res, void *match_data) | ||
561 | { | ||
562 | return *(struct dma_pool **)res == match_data; | ||
563 | } | ||
564 | |||
565 | static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags, | ||
566 | enum pool_type type) | ||
567 | { | ||
568 | char *n[] = {"wc", "uc", "cached", " dma32", "unknown",}; | ||
569 | enum pool_type t[] = {IS_WC, IS_UC, IS_CACHED, IS_DMA32, IS_UNDEFINED}; | ||
570 | struct device_pools *sec_pool = NULL; | ||
571 | struct dma_pool *pool = NULL, **ptr; | ||
572 | unsigned i; | ||
573 | int ret = -ENODEV; | ||
574 | char *p; | ||
575 | |||
576 | if (!dev) | ||
577 | return NULL; | ||
578 | |||
579 | ptr = devres_alloc(ttm_dma_pool_release, sizeof(*ptr), GFP_KERNEL); | ||
580 | if (!ptr) | ||
581 | return NULL; | ||
582 | |||
583 | ret = -ENOMEM; | ||
584 | |||
585 | pool = kmalloc_node(sizeof(struct dma_pool), GFP_KERNEL, | ||
586 | dev_to_node(dev)); | ||
587 | if (!pool) | ||
588 | goto err_mem; | ||
589 | |||
590 | sec_pool = kmalloc_node(sizeof(struct device_pools), GFP_KERNEL, | ||
591 | dev_to_node(dev)); | ||
592 | if (!sec_pool) | ||
593 | goto err_mem; | ||
594 | |||
595 | INIT_LIST_HEAD(&sec_pool->pools); | ||
596 | sec_pool->dev = dev; | ||
597 | sec_pool->pool = pool; | ||
598 | |||
599 | INIT_LIST_HEAD(&pool->free_list); | ||
600 | INIT_LIST_HEAD(&pool->inuse_list); | ||
601 | INIT_LIST_HEAD(&pool->pools); | ||
602 | spin_lock_init(&pool->lock); | ||
603 | pool->dev = dev; | ||
604 | pool->npages_free = pool->npages_in_use = 0; | ||
605 | pool->nfrees = 0; | ||
606 | pool->gfp_flags = flags; | ||
607 | pool->size = PAGE_SIZE; | ||
608 | pool->type = type; | ||
609 | pool->nrefills = 0; | ||
610 | p = pool->name; | ||
611 | for (i = 0; i < 5; i++) { | ||
612 | if (type & t[i]) { | ||
613 | p += snprintf(p, sizeof(pool->name) - (p - pool->name), | ||
614 | "%s", n[i]); | ||
615 | } | ||
616 | } | ||
617 | *p = 0; | ||
618 | /* We copy the name for pr_ calls b/c when dma_pool_destroy is called | ||
619 | * - the kobj->name has already been deallocated.*/ | ||
620 | snprintf(pool->dev_name, sizeof(pool->dev_name), "%s %s", | ||
621 | dev_driver_string(dev), dev_name(dev)); | ||
622 | mutex_lock(&_manager->lock); | ||
623 | /* You can get the dma_pool from either the global: */ | ||
624 | list_add(&sec_pool->pools, &_manager->pools); | ||
625 | _manager->npools++; | ||
626 | /* or from 'struct device': */ | ||
627 | list_add(&pool->pools, &dev->dma_pools); | ||
628 | mutex_unlock(&_manager->lock); | ||
629 | |||
630 | *ptr = pool; | ||
631 | devres_add(dev, ptr); | ||
632 | |||
633 | return pool; | ||
634 | err_mem: | ||
635 | devres_free(ptr); | ||
636 | kfree(sec_pool); | ||
637 | kfree(pool); | ||
638 | return ERR_PTR(ret); | ||
639 | } | ||
640 | |||
641 | static struct dma_pool *ttm_dma_find_pool(struct device *dev, | ||
642 | enum pool_type type) | ||
643 | { | ||
644 | struct dma_pool *pool, *tmp, *found = NULL; | ||
645 | |||
646 | if (type == IS_UNDEFINED) | ||
647 | return found; | ||
648 | |||
649 | /* NB: We iterate on the 'struct dev' which has no spinlock, but | ||
650 | * it does have a kref which we have taken. The kref is taken during | ||
651 | * graphic driver loading - in the drm_pci_init it calls either | ||
652 | * pci_dev_get or pci_register_driver which both end up taking a kref | ||
653 | * on 'struct device'. | ||
654 | * | ||
655 | * On teardown, the graphic drivers end up quiescing the TTM (put_pages) | ||
656 | * and calls the dev_res deconstructors: ttm_dma_pool_release. The nice | ||
657 | * thing is at that point of time there are no pages associated with the | ||
658 | * driver so this function will not be called. | ||
659 | */ | ||
660 | list_for_each_entry_safe(pool, tmp, &dev->dma_pools, pools) { | ||
661 | if (pool->type != type) | ||
662 | continue; | ||
663 | found = pool; | ||
664 | break; | ||
665 | } | ||
666 | return found; | ||
667 | } | ||
668 | |||
669 | /* | ||
670 | * Free pages the pages that failed to change the caching state. If there | ||
671 | * are pages that have changed their caching state already put them to the | ||
672 | * pool. | ||
673 | */ | ||
674 | static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, | ||
675 | struct list_head *d_pages, | ||
676 | struct page **failed_pages, | ||
677 | unsigned cpages) | ||
678 | { | ||
679 | struct dma_page *d_page, *tmp; | ||
680 | struct page *p; | ||
681 | unsigned i = 0; | ||
682 | |||
683 | p = failed_pages[0]; | ||
684 | if (!p) | ||
685 | return; | ||
686 | /* Find the failed page. */ | ||
687 | list_for_each_entry_safe(d_page, tmp, d_pages, page_list) { | ||
688 | if (d_page->p != p) | ||
689 | continue; | ||
690 | /* .. and then progress over the full list. */ | ||
691 | list_del(&d_page->page_list); | ||
692 | __ttm_dma_free_page(pool, d_page); | ||
693 | if (++i < cpages) | ||
694 | p = failed_pages[i]; | ||
695 | else | ||
696 | break; | ||
697 | } | ||
698 | |||
699 | } | ||
700 | |||
701 | /* | ||
702 | * Allocate 'count' pages, and put 'need' number of them on the | ||
703 | * 'pages' and as well on the 'dma_address' starting at 'dma_offset' offset. | ||
704 | * The full list of pages should also be on 'd_pages'. | ||
705 | * We return zero for success, and negative numbers as errors. | ||
706 | */ | ||
707 | static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, | ||
708 | struct list_head *d_pages, | ||
709 | unsigned count) | ||
710 | { | ||
711 | struct page **caching_array; | ||
712 | struct dma_page *dma_p; | ||
713 | struct page *p; | ||
714 | int r = 0; | ||
715 | unsigned i, cpages; | ||
716 | unsigned max_cpages = min(count, | ||
717 | (unsigned)(PAGE_SIZE/sizeof(struct page *))); | ||
718 | |||
719 | /* allocate array for page caching change */ | ||
720 | caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL); | ||
721 | |||
722 | if (!caching_array) { | ||
723 | pr_err(TTM_PFX | ||
724 | "%s: Unable to allocate table for new pages.", | ||
725 | pool->dev_name); | ||
726 | return -ENOMEM; | ||
727 | } | ||
728 | |||
729 | if (count > 1) { | ||
730 | pr_debug("%s: (%s:%d) Getting %d pages\n", | ||
731 | pool->dev_name, pool->name, current->pid, | ||
732 | count); | ||
733 | } | ||
734 | |||
735 | for (i = 0, cpages = 0; i < count; ++i) { | ||
736 | dma_p = __ttm_dma_alloc_page(pool); | ||
737 | if (!dma_p) { | ||
738 | pr_err(TTM_PFX "%s: Unable to get page %u.\n", | ||
739 | pool->dev_name, i); | ||
740 | |||
741 | /* store already allocated pages in the pool after | ||
742 | * setting the caching state */ | ||
743 | if (cpages) { | ||
744 | r = ttm_set_pages_caching(pool, caching_array, | ||
745 | cpages); | ||
746 | if (r) | ||
747 | ttm_dma_handle_caching_state_failure( | ||
748 | pool, d_pages, caching_array, | ||
749 | cpages); | ||
750 | } | ||
751 | r = -ENOMEM; | ||
752 | goto out; | ||
753 | } | ||
754 | p = dma_p->p; | ||
755 | #ifdef CONFIG_HIGHMEM | ||
756 | /* gfp flags of highmem page should never be dma32 so we | ||
757 | * we should be fine in such case | ||
758 | */ | ||
759 | if (!PageHighMem(p)) | ||
760 | #endif | ||
761 | { | ||
762 | caching_array[cpages++] = p; | ||
763 | if (cpages == max_cpages) { | ||
764 | /* Note: Cannot hold the spinlock */ | ||
765 | r = ttm_set_pages_caching(pool, caching_array, | ||
766 | cpages); | ||
767 | if (r) { | ||
768 | ttm_dma_handle_caching_state_failure( | ||
769 | pool, d_pages, caching_array, | ||
770 | cpages); | ||
771 | goto out; | ||
772 | } | ||
773 | cpages = 0; | ||
774 | } | ||
775 | } | ||
776 | list_add(&dma_p->page_list, d_pages); | ||
777 | } | ||
778 | |||
779 | if (cpages) { | ||
780 | r = ttm_set_pages_caching(pool, caching_array, cpages); | ||
781 | if (r) | ||
782 | ttm_dma_handle_caching_state_failure(pool, d_pages, | ||
783 | caching_array, cpages); | ||
784 | } | ||
785 | out: | ||
786 | kfree(caching_array); | ||
787 | return r; | ||
788 | } | ||
789 | |||
790 | /* | ||
791 | * @return count of pages still required to fulfill the request. | ||
792 | */ | ||
793 | static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, | ||
794 | unsigned long *irq_flags) | ||
795 | { | ||
796 | unsigned count = _manager->options.small; | ||
797 | int r = pool->npages_free; | ||
798 | |||
799 | if (count > pool->npages_free) { | ||
800 | struct list_head d_pages; | ||
801 | |||
802 | INIT_LIST_HEAD(&d_pages); | ||
803 | |||
804 | spin_unlock_irqrestore(&pool->lock, *irq_flags); | ||
805 | |||
806 | /* Returns how many more are neccessary to fulfill the | ||
807 | * request. */ | ||
808 | r = ttm_dma_pool_alloc_new_pages(pool, &d_pages, count); | ||
809 | |||
810 | spin_lock_irqsave(&pool->lock, *irq_flags); | ||
811 | if (!r) { | ||
812 | /* Add the fresh to the end.. */ | ||
813 | list_splice(&d_pages, &pool->free_list); | ||
814 | ++pool->nrefills; | ||
815 | pool->npages_free += count; | ||
816 | r = count; | ||
817 | } else { | ||
818 | struct dma_page *d_page; | ||
819 | unsigned cpages = 0; | ||
820 | |||
821 | pr_err(TTM_PFX "%s: Failed to fill %s pool (r:%d)!\n", | ||
822 | pool->dev_name, pool->name, r); | ||
823 | |||
824 | list_for_each_entry(d_page, &d_pages, page_list) { | ||
825 | cpages++; | ||
826 | } | ||
827 | list_splice_tail(&d_pages, &pool->free_list); | ||
828 | pool->npages_free += cpages; | ||
829 | r = cpages; | ||
830 | } | ||
831 | } | ||
832 | return r; | ||
833 | } | ||
834 | |||
835 | /* | ||
836 | * @return count of pages still required to fulfill the request. | ||
837 | * The populate list is actually a stack (not that is matters as TTM | ||
838 | * allocates one page at a time. | ||
839 | */ | ||
840 | static int ttm_dma_pool_get_pages(struct dma_pool *pool, | ||
841 | struct ttm_tt *ttm, | ||
842 | unsigned index) | ||
843 | { | ||
844 | struct dma_page *d_page; | ||
845 | unsigned long irq_flags; | ||
846 | int count, r = -ENOMEM; | ||
847 | |||
848 | spin_lock_irqsave(&pool->lock, irq_flags); | ||
849 | count = ttm_dma_page_pool_fill_locked(pool, &irq_flags); | ||
850 | if (count) { | ||
851 | d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); | ||
852 | ttm->pages[index] = d_page->p; | ||
853 | ttm->dma_address[index] = d_page->dma; | ||
854 | list_move_tail(&d_page->page_list, &ttm->alloc_list); | ||
855 | r = 0; | ||
856 | pool->npages_in_use += 1; | ||
857 | pool->npages_free -= 1; | ||
858 | } | ||
859 | spin_unlock_irqrestore(&pool->lock, irq_flags); | ||
860 | return r; | ||
861 | } | ||
862 | |||
863 | /* | ||
864 | * On success pages list will hold count number of correctly | ||
865 | * cached pages. On failure will hold the negative return value (-ENOMEM, etc). | ||
866 | */ | ||
867 | int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev) | ||
868 | { | ||
869 | struct ttm_mem_global *mem_glob = ttm->glob->mem_glob; | ||
870 | struct dma_pool *pool; | ||
871 | enum pool_type type; | ||
872 | unsigned i; | ||
873 | gfp_t gfp_flags; | ||
874 | int ret; | ||
875 | |||
876 | if (ttm->state != tt_unpopulated) | ||
877 | return 0; | ||
878 | |||
879 | type = ttm_to_type(ttm->page_flags, ttm->caching_state); | ||
880 | if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) | ||
881 | gfp_flags = GFP_USER | GFP_DMA32; | ||
882 | else | ||
883 | gfp_flags = GFP_HIGHUSER; | ||
884 | if (ttm->page_flags & TTM_PAGE_FLAG_ZERO_ALLOC) | ||
885 | gfp_flags |= __GFP_ZERO; | ||
886 | |||
887 | pool = ttm_dma_find_pool(dev, type); | ||
888 | if (!pool) { | ||
889 | pool = ttm_dma_pool_init(dev, gfp_flags, type); | ||
890 | if (IS_ERR_OR_NULL(pool)) { | ||
891 | return -ENOMEM; | ||
892 | } | ||
893 | } | ||
894 | |||
895 | INIT_LIST_HEAD(&ttm->alloc_list); | ||
896 | for (i = 0; i < ttm->num_pages; ++i) { | ||
897 | ret = ttm_dma_pool_get_pages(pool, ttm, i); | ||
898 | if (ret != 0) { | ||
899 | ttm_dma_unpopulate(ttm, dev); | ||
900 | return -ENOMEM; | ||
901 | } | ||
902 | |||
903 | ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], | ||
904 | false, false); | ||
905 | if (unlikely(ret != 0)) { | ||
906 | ttm_dma_unpopulate(ttm, dev); | ||
907 | return -ENOMEM; | ||
908 | } | ||
909 | } | ||
910 | |||
911 | if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) { | ||
912 | ret = ttm_tt_swapin(ttm); | ||
913 | if (unlikely(ret != 0)) { | ||
914 | ttm_dma_unpopulate(ttm, dev); | ||
915 | return ret; | ||
916 | } | ||
917 | } | ||
918 | |||
919 | ttm->state = tt_unbound; | ||
920 | return 0; | ||
921 | } | ||
922 | EXPORT_SYMBOL_GPL(ttm_dma_populate); | ||
923 | |||
924 | /* Get good estimation how many pages are free in pools */ | ||
925 | static int ttm_dma_pool_get_num_unused_pages(void) | ||
926 | { | ||
927 | struct device_pools *p; | ||
928 | unsigned total = 0; | ||
929 | |||
930 | mutex_lock(&_manager->lock); | ||
931 | list_for_each_entry(p, &_manager->pools, pools) { | ||
932 | if (p) | ||
933 | total += p->pool->npages_free; | ||
934 | } | ||
935 | mutex_unlock(&_manager->lock); | ||
936 | return total; | ||
937 | } | ||
938 | |||
939 | /* Put all pages in pages list to correct pool to wait for reuse */ | ||
940 | void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev) | ||
941 | { | ||
942 | struct dma_pool *pool; | ||
943 | struct dma_page *d_page, *next; | ||
944 | enum pool_type type; | ||
945 | bool is_cached = false; | ||
946 | unsigned count = 0, i; | ||
947 | unsigned long irq_flags; | ||
948 | |||
949 | type = ttm_to_type(ttm->page_flags, ttm->caching_state); | ||
950 | pool = ttm_dma_find_pool(dev, type); | ||
951 | if (!pool) { | ||
952 | WARN_ON(!pool); | ||
953 | return; | ||
954 | } | ||
955 | is_cached = (ttm_dma_find_pool(pool->dev, | ||
956 | ttm_to_type(ttm->page_flags, tt_cached)) == pool); | ||
957 | |||
958 | /* make sure pages array match list and count number of pages */ | ||
959 | list_for_each_entry(d_page, &ttm->alloc_list, page_list) { | ||
960 | ttm->pages[count] = d_page->p; | ||
961 | count++; | ||
962 | } | ||
963 | |||
964 | spin_lock_irqsave(&pool->lock, irq_flags); | ||
965 | pool->npages_in_use -= count; | ||
966 | if (is_cached) { | ||
967 | pool->nfrees += count; | ||
968 | } else { | ||
969 | pool->npages_free += count; | ||
970 | list_splice(&ttm->alloc_list, &pool->free_list); | ||
971 | if (pool->npages_free > _manager->options.max_size) { | ||
972 | count = pool->npages_free - _manager->options.max_size; | ||
973 | } | ||
974 | } | ||
975 | spin_unlock_irqrestore(&pool->lock, irq_flags); | ||
976 | |||
977 | if (is_cached) { | ||
978 | list_for_each_entry_safe(d_page, next, &ttm->alloc_list, page_list) { | ||
979 | ttm_mem_global_free_page(ttm->glob->mem_glob, | ||
980 | d_page->p); | ||
981 | ttm_dma_page_put(pool, d_page); | ||
982 | } | ||
983 | } else { | ||
984 | for (i = 0; i < count; i++) { | ||
985 | ttm_mem_global_free_page(ttm->glob->mem_glob, | ||
986 | ttm->pages[i]); | ||
987 | } | ||
988 | } | ||
989 | |||
990 | INIT_LIST_HEAD(&ttm->alloc_list); | ||
991 | for (i = 0; i < ttm->num_pages; i++) { | ||
992 | ttm->pages[i] = NULL; | ||
993 | ttm->dma_address[i] = 0; | ||
994 | } | ||
995 | |||
996 | /* shrink pool if necessary */ | ||
997 | if (count) | ||
998 | ttm_dma_page_pool_free(pool, count); | ||
999 | ttm->state = tt_unpopulated; | ||
1000 | } | ||
1001 | EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); | ||
1002 | |||
1003 | /** | ||
1004 | * Callback for mm to request pool to reduce number of page held. | ||
1005 | */ | ||
1006 | static int ttm_dma_pool_mm_shrink(struct shrinker *shrink, | ||
1007 | struct shrink_control *sc) | ||
1008 | { | ||
1009 | static atomic_t start_pool = ATOMIC_INIT(0); | ||
1010 | unsigned idx = 0; | ||
1011 | unsigned pool_offset = atomic_add_return(1, &start_pool); | ||
1012 | unsigned shrink_pages = sc->nr_to_scan; | ||
1013 | struct device_pools *p; | ||
1014 | |||
1015 | if (list_empty(&_manager->pools)) | ||
1016 | return 0; | ||
1017 | |||
1018 | mutex_lock(&_manager->lock); | ||
1019 | pool_offset = pool_offset % _manager->npools; | ||
1020 | list_for_each_entry(p, &_manager->pools, pools) { | ||
1021 | unsigned nr_free; | ||
1022 | |||
1023 | if (!p && !p->dev) | ||
1024 | continue; | ||
1025 | if (shrink_pages == 0) | ||
1026 | break; | ||
1027 | /* Do it in round-robin fashion. */ | ||
1028 | if (++idx < pool_offset) | ||
1029 | continue; | ||
1030 | nr_free = shrink_pages; | ||
1031 | shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); | ||
1032 | pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", | ||
1033 | p->pool->dev_name, p->pool->name, current->pid, nr_free, | ||
1034 | shrink_pages); | ||
1035 | } | ||
1036 | mutex_unlock(&_manager->lock); | ||
1037 | /* return estimated number of unused pages in pool */ | ||
1038 | return ttm_dma_pool_get_num_unused_pages(); | ||
1039 | } | ||
1040 | |||
1041 | static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager) | ||
1042 | { | ||
1043 | manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink; | ||
1044 | manager->mm_shrink.seeks = 1; | ||
1045 | register_shrinker(&manager->mm_shrink); | ||
1046 | } | ||
1047 | |||
1048 | static void ttm_dma_pool_mm_shrink_fini(struct ttm_pool_manager *manager) | ||
1049 | { | ||
1050 | unregister_shrinker(&manager->mm_shrink); | ||
1051 | } | ||
1052 | |||
1053 | int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages) | ||
1054 | { | ||
1055 | int ret = -ENOMEM; | ||
1056 | |||
1057 | WARN_ON(_manager); | ||
1058 | |||
1059 | printk(KERN_INFO TTM_PFX "Initializing DMA pool allocator.\n"); | ||
1060 | |||
1061 | _manager = kzalloc(sizeof(*_manager), GFP_KERNEL); | ||
1062 | if (!_manager) | ||
1063 | goto err_manager; | ||
1064 | |||
1065 | mutex_init(&_manager->lock); | ||
1066 | INIT_LIST_HEAD(&_manager->pools); | ||
1067 | |||
1068 | _manager->options.max_size = max_pages; | ||
1069 | _manager->options.small = SMALL_ALLOCATION; | ||
1070 | _manager->options.alloc_size = NUM_PAGES_TO_ALLOC; | ||
1071 | |||
1072 | /* This takes care of auto-freeing the _manager */ | ||
1073 | ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type, | ||
1074 | &glob->kobj, "dma_pool"); | ||
1075 | if (unlikely(ret != 0)) { | ||
1076 | kobject_put(&_manager->kobj); | ||
1077 | goto err; | ||
1078 | } | ||
1079 | ttm_dma_pool_mm_shrink_init(_manager); | ||
1080 | return 0; | ||
1081 | err_manager: | ||
1082 | kfree(_manager); | ||
1083 | _manager = NULL; | ||
1084 | err: | ||
1085 | return ret; | ||
1086 | } | ||
1087 | |||
1088 | void ttm_dma_page_alloc_fini(void) | ||
1089 | { | ||
1090 | struct device_pools *p, *t; | ||
1091 | |||
1092 | printk(KERN_INFO TTM_PFX "Finalizing DMA pool allocator.\n"); | ||
1093 | ttm_dma_pool_mm_shrink_fini(_manager); | ||
1094 | |||
1095 | list_for_each_entry_safe_reverse(p, t, &_manager->pools, pools) { | ||
1096 | dev_dbg(p->dev, "(%s:%d) Freeing.\n", p->pool->name, | ||
1097 | current->pid); | ||
1098 | WARN_ON(devres_destroy(p->dev, ttm_dma_pool_release, | ||
1099 | ttm_dma_pool_match, p->pool)); | ||
1100 | ttm_dma_free_pool(p->dev, p->pool->type); | ||
1101 | } | ||
1102 | kobject_put(&_manager->kobj); | ||
1103 | _manager = NULL; | ||
1104 | } | ||
1105 | |||
1106 | int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) | ||
1107 | { | ||
1108 | struct device_pools *p; | ||
1109 | struct dma_pool *pool = NULL; | ||
1110 | char *h[] = {"pool", "refills", "pages freed", "inuse", "available", | ||
1111 | "name", "virt", "busaddr"}; | ||
1112 | |||
1113 | if (!_manager) { | ||
1114 | seq_printf(m, "No pool allocator running.\n"); | ||
1115 | return 0; | ||
1116 | } | ||
1117 | seq_printf(m, "%13s %12s %13s %8s %8s %8s\n", | ||
1118 | h[0], h[1], h[2], h[3], h[4], h[5]); | ||
1119 | mutex_lock(&_manager->lock); | ||
1120 | list_for_each_entry(p, &_manager->pools, pools) { | ||
1121 | struct device *dev = p->dev; | ||
1122 | if (!dev) | ||
1123 | continue; | ||
1124 | pool = p->pool; | ||
1125 | seq_printf(m, "%13s %12ld %13ld %8d %8d %8s\n", | ||
1126 | pool->name, pool->nrefills, | ||
1127 | pool->nfrees, pool->npages_in_use, | ||
1128 | pool->npages_free, | ||
1129 | pool->dev_name); | ||
1130 | } | ||
1131 | mutex_unlock(&_manager->lock); | ||
1132 | return 0; | ||
1133 | } | ||
1134 | EXPORT_SYMBOL_GPL(ttm_dma_page_alloc_debugfs); | ||