diff options
author | Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> | 2014-08-03 07:02:31 -0400 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2014-08-04 20:54:19 -0400 |
commit | a91576d7916f6cce76d30303e60e1ac47cf4a76d (patch) | |
tree | 2f710ba8939b09fe36559a9692f3e9bb39280f23 /drivers | |
parent | 71336e011d1d2312bcbcaa8fcec7365024f3a95d (diff) |
drm/ttm: Pass GFP flags in order to avoid deadlock.
Commit 7dc19d5a "drivers: convert shrinkers to new count/scan API" added
deadlock warnings that ttm_page_pool_free() and ttm_dma_page_pool_free()
are currently doing GFP_KERNEL allocation.
But these functions did not get updated to receive gfp_t argument.
This patch explicitly passes sc->gfp_mask or GFP_KERNEL to these functions,
and removes the deadlock warning.
Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: stable <stable@kernel.org> [2.6.35+]
Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 19 |
2 files changed, 19 insertions, 19 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index edb83151041f..09874d695188 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c | |||
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool, | |||
297 | * | 297 | * |
298 | * @pool: to free the pages from | 298 | * @pool: to free the pages from |
299 | * @free_all: If set to true will free all pages in pool | 299 | * @free_all: If set to true will free all pages in pool |
300 | * @gfp: GFP flags. | ||
300 | **/ | 301 | **/ |
301 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) | 302 | static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free, |
303 | gfp_t gfp) | ||
302 | { | 304 | { |
303 | unsigned long irq_flags; | 305 | unsigned long irq_flags; |
304 | struct page *p; | 306 | struct page *p; |
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) | |||
309 | if (NUM_PAGES_TO_ALLOC < nr_free) | 311 | if (NUM_PAGES_TO_ALLOC < nr_free) |
310 | npages_to_free = NUM_PAGES_TO_ALLOC; | 312 | npages_to_free = NUM_PAGES_TO_ALLOC; |
311 | 313 | ||
312 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 314 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); |
313 | GFP_KERNEL); | ||
314 | if (!pages_to_free) { | 315 | if (!pages_to_free) { |
315 | pr_err("Failed to allocate memory for pool free operation\n"); | 316 | pr_err("Failed to allocate memory for pool free operation\n"); |
316 | return 0; | 317 | return 0; |
@@ -382,9 +383,7 @@ out: | |||
382 | * | 383 | * |
383 | * XXX: (dchinner) Deadlock warning! | 384 | * XXX: (dchinner) Deadlock warning! |
384 | * | 385 | * |
385 | * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means | 386 | * We need to pass sc->gfp_mask to ttm_page_pool_free(). |
386 | * this can deadlock when called a sc->gfp_mask that is not equal to | ||
387 | * GFP_KERNEL. | ||
388 | * | 387 | * |
389 | * This code is crying out for a shrinker per pool.... | 388 | * This code is crying out for a shrinker per pool.... |
390 | */ | 389 | */ |
@@ -408,7 +407,8 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |||
408 | if (shrink_pages == 0) | 407 | if (shrink_pages == 0) |
409 | break; | 408 | break; |
410 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; | 409 | pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; |
411 | shrink_pages = ttm_page_pool_free(pool, nr_free); | 410 | shrink_pages = ttm_page_pool_free(pool, nr_free, |
411 | sc->gfp_mask); | ||
412 | freed += nr_free - shrink_pages; | 412 | freed += nr_free - shrink_pages; |
413 | } | 413 | } |
414 | mutex_unlock(&lock); | 414 | mutex_unlock(&lock); |
@@ -710,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, | |||
710 | } | 710 | } |
711 | spin_unlock_irqrestore(&pool->lock, irq_flags); | 711 | spin_unlock_irqrestore(&pool->lock, irq_flags); |
712 | if (npages) | 712 | if (npages) |
713 | ttm_page_pool_free(pool, npages); | 713 | ttm_page_pool_free(pool, npages, GFP_KERNEL); |
714 | } | 714 | } |
715 | 715 | ||
716 | /* | 716 | /* |
@@ -850,7 +850,8 @@ void ttm_page_alloc_fini(void) | |||
850 | ttm_pool_mm_shrink_fini(_manager); | 850 | ttm_pool_mm_shrink_fini(_manager); |
851 | 851 | ||
852 | for (i = 0; i < NUM_POOLS; ++i) | 852 | for (i = 0; i < NUM_POOLS; ++i) |
853 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); | 853 | ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES, |
854 | GFP_KERNEL); | ||
854 | 855 | ||
855 | kobject_put(&_manager->kobj); | 856 | kobject_put(&_manager->kobj); |
856 | _manager = NULL; | 857 | _manager = NULL; |
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 524cc1a2c1fa..ca65df144765 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | |||
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page) | |||
411 | * | 411 | * |
412 | * @pool: to free the pages from | 412 | * @pool: to free the pages from |
413 | * @nr_free: If set to true will free all pages in pool | 413 | * @nr_free: If set to true will free all pages in pool |
414 | * @gfp: GFP flags. | ||
414 | **/ | 415 | **/ |
415 | static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) | 416 | static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free, |
417 | gfp_t gfp) | ||
416 | { | 418 | { |
417 | unsigned long irq_flags; | 419 | unsigned long irq_flags; |
418 | struct dma_page *dma_p, *tmp; | 420 | struct dma_page *dma_p, *tmp; |
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) | |||
430 | npages_to_free, nr_free); | 432 | npages_to_free, nr_free); |
431 | } | 433 | } |
432 | #endif | 434 | #endif |
433 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), | 435 | pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp); |
434 | GFP_KERNEL); | ||
435 | 436 | ||
436 | if (!pages_to_free) { | 437 | if (!pages_to_free) { |
437 | pr_err("%s: Failed to allocate memory for pool free operation\n", | 438 | pr_err("%s: Failed to allocate memory for pool free operation\n", |
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type) | |||
530 | if (pool->type != type) | 531 | if (pool->type != type) |
531 | continue; | 532 | continue; |
532 | /* Takes a spinlock.. */ | 533 | /* Takes a spinlock.. */ |
533 | ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); | 534 | ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL); |
534 | WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); | 535 | WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); |
535 | /* This code path is called after _all_ references to the | 536 | /* This code path is called after _all_ references to the |
536 | * struct device has been dropped - so nobody should be | 537 | * struct device has been dropped - so nobody should be |
@@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) | |||
983 | 984 | ||
984 | /* shrink pool if necessary (only on !is_cached pools)*/ | 985 | /* shrink pool if necessary (only on !is_cached pools)*/ |
985 | if (npages) | 986 | if (npages) |
986 | ttm_dma_page_pool_free(pool, npages); | 987 | ttm_dma_page_pool_free(pool, npages, GFP_KERNEL); |
987 | ttm->state = tt_unpopulated; | 988 | ttm->state = tt_unpopulated; |
988 | } | 989 | } |
989 | EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); | 990 | EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); |
@@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); | |||
993 | * | 994 | * |
994 | * XXX: (dchinner) Deadlock warning! | 995 | * XXX: (dchinner) Deadlock warning! |
995 | * | 996 | * |
996 | * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention | 997 | * We need to pass sc->gfp_mask to ttm_dma_page_pool_free(). |
997 | * needs to be paid to sc->gfp_mask to determine if this can be done or not. | ||
998 | * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really | ||
999 | * bad. | ||
1000 | * | 998 | * |
1001 | * I'm getting sadder as I hear more pathetical whimpers about needing per-pool | 999 | * I'm getting sadder as I hear more pathetical whimpers about needing per-pool |
1002 | * shrinkers | 1000 | * shrinkers |
@@ -1030,7 +1028,8 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) | |||
1030 | if (++idx < pool_offset) | 1028 | if (++idx < pool_offset) |
1031 | continue; | 1029 | continue; |
1032 | nr_free = shrink_pages; | 1030 | nr_free = shrink_pages; |
1033 | shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); | 1031 | shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free, |
1032 | sc->gfp_mask); | ||
1034 | freed += nr_free - shrink_pages; | 1033 | freed += nr_free - shrink_pages; |
1035 | 1034 | ||
1036 | pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", | 1035 | pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", |