aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c26
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c25
2 files changed, 30 insertions, 21 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 09874d695188..025c429050c0 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,11 +297,12 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
297 * 297 *
298 * @pool: to free the pages from 298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool 299 * @free_all: If set to true will free all pages in pool
300 * @gfp: GFP flags. 300 * @use_static: Safe to use static buffer
301 **/ 301 **/
302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, 302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
303 gfp_t gfp) 303 bool use_static)
304{ 304{
305 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
305 unsigned long irq_flags; 306 unsigned long irq_flags;
306 struct page *p; 307 struct page *p;
307 struct page **pages_to_free; 308 struct page **pages_to_free;
@@ -311,7 +312,11 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
311 if (NUM_PAGES_TO_ALLOC < nr_free) 312 if (NUM_PAGES_TO_ALLOC < nr_free)
312 npages_to_free = NUM_PAGES_TO_ALLOC; 313 npages_to_free = NUM_PAGES_TO_ALLOC;
313 314
314 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); 315 if (use_static)
316 pages_to_free = static_buf;
317 else
318 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
319 GFP_KERNEL);
315 if (!pages_to_free) { 320 if (!pages_to_free) {
316 pr_err("Failed to allocate memory for pool free operation\n"); 321 pr_err("Failed to allocate memory for pool free operation\n");
317 return 0; 322 return 0;
@@ -374,7 +379,8 @@ restart:
374 if (freed_pages) 379 if (freed_pages)
375 ttm_pages_put(pages_to_free, freed_pages); 380 ttm_pages_put(pages_to_free, freed_pages);
376out: 381out:
377 kfree(pages_to_free); 382 if (pages_to_free != static_buf)
383 kfree(pages_to_free);
378 return nr_free; 384 return nr_free;
379} 385}
380 386
@@ -383,8 +389,6 @@ out:
383 * 389 *
384 * XXX: (dchinner) Deadlock warning! 390 * XXX: (dchinner) Deadlock warning!
385 * 391 *
386 * We need to pass sc->gfp_mask to ttm_page_pool_free().
387 *
388 * This code is crying out for a shrinker per pool.... 392 * This code is crying out for a shrinker per pool....
389 */ 393 */
390static unsigned long 394static unsigned long
@@ -407,8 +411,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
407 if (shrink_pages == 0) 411 if (shrink_pages == 0)
408 break; 412 break;
409 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 413 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
410 shrink_pages = ttm_page_pool_free(pool, nr_free, 414 /* OK to use static buffer since global mutex is held. */
411 sc->gfp_mask); 415 shrink_pages = ttm_page_pool_free(pool, nr_free, true);
412 freed += nr_free - shrink_pages; 416 freed += nr_free - shrink_pages;
413 } 417 }
414 mutex_unlock(&lock); 418 mutex_unlock(&lock);
@@ -710,7 +714,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
710 } 714 }
711 spin_unlock_irqrestore(&pool->lock, irq_flags); 715 spin_unlock_irqrestore(&pool->lock, irq_flags);
712 if (npages) 716 if (npages)
713 ttm_page_pool_free(pool, npages, GFP_KERNEL); 717 ttm_page_pool_free(pool, npages, false);
714} 718}
715 719
716/* 720/*
@@ -849,9 +853,9 @@ void ttm_page_alloc_fini(void)
849 pr_info("Finalizing pool allocator\n"); 853 pr_info("Finalizing pool allocator\n");
850 ttm_pool_mm_shrink_fini(_manager); 854 ttm_pool_mm_shrink_fini(_manager);
851 855
856 /* OK to use static buffer since global mutex is no longer used. */
852 for (i = 0; i < NUM_POOLS; ++i) 857 for (i = 0; i < NUM_POOLS; ++i)
853 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, 858 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, true);
854 GFP_KERNEL);
855 859
856 kobject_put(&_manager->kobj); 860 kobject_put(&_manager->kobj);
857 _manager = NULL; 861 _manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index c96db433f8af..01e1d27eb078 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,11 +411,12 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
411 * 411 *
412 * @pool: to free the pages from 412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool 413 * @nr_free: If set to true will free all pages in pool
414 * @gfp: GFP flags. 414 * @use_static: Safe to use static buffer
415 **/ 415 **/
416static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, 416static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
417 gfp_t gfp) 417 bool use_static)
418{ 418{
419 static struct page *static_buf[NUM_PAGES_TO_ALLOC];
419 unsigned long irq_flags; 420 unsigned long irq_flags;
420 struct dma_page *dma_p, *tmp; 421 struct dma_page *dma_p, *tmp;
421 struct page **pages_to_free; 422 struct page **pages_to_free;
@@ -432,7 +433,11 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
432 npages_to_free, nr_free); 433 npages_to_free, nr_free);
433 } 434 }
434#endif 435#endif
435 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); 436 if (use_static)
437 pages_to_free = static_buf;
438 else
439 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
440 GFP_KERNEL);
436 441
437 if (!pages_to_free) { 442 if (!pages_to_free) {
438 pr_err("%s: Failed to allocate memory for pool free operation\n", 443 pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -502,7 +507,8 @@ restart:
502 if (freed_pages) 507 if (freed_pages)
503 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages); 508 ttm_dma_pages_put(pool, &d_pages, pages_to_free, freed_pages);
504out: 509out:
505 kfree(pages_to_free); 510 if (pages_to_free != static_buf)
511 kfree(pages_to_free);
506 return nr_free; 512 return nr_free;
507} 513}
508 514
@@ -531,7 +537,8 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
531 if (pool->type != type) 537 if (pool->type != type)
532 continue; 538 continue;
533 /* Takes a spinlock.. */ 539 /* Takes a spinlock.. */
534 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL); 540 /* OK to use static buffer since global mutex is held. */
541 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, true);
535 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); 542 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
536 /* This code path is called after _all_ references to the 543 /* This code path is called after _all_ references to the
537 * struct device has been dropped - so nobody should be 544 * struct device has been dropped - so nobody should be
@@ -986,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
986 993
987 /* shrink pool if necessary (only on !is_cached pools)*/ 994 /* shrink pool if necessary (only on !is_cached pools)*/
988 if (npages) 995 if (npages)
989 ttm_dma_page_pool_free(pool, npages, GFP_KERNEL); 996 ttm_dma_page_pool_free(pool, npages, false);
990 ttm->state = tt_unpopulated; 997 ttm->state = tt_unpopulated;
991} 998}
992EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 999EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -996,8 +1003,6 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
996 * 1003 *
997 * XXX: (dchinner) Deadlock warning! 1004 * XXX: (dchinner) Deadlock warning!
998 * 1005 *
999 * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
1000 *
1001 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool 1006 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1002 * shrinkers 1007 * shrinkers
1003 */ 1008 */
@@ -1030,8 +1035,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1030 if (++idx < pool_offset) 1035 if (++idx < pool_offset)
1031 continue; 1036 continue;
1032 nr_free = shrink_pages; 1037 nr_free = shrink_pages;
1033 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, 1038 /* OK to use static buffer since global mutex is held. */
1034 sc->gfp_mask); 1039 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, true);
1035 freed += nr_free - shrink_pages; 1040 freed += nr_free - shrink_pages;
1036 1041
1037 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1042 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",