aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 20:36:12 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-07 20:36:12 -0400
commita7d7a143d0b4cb1914705884ca5c25e322dba693 (patch)
tree0ee5e9e43f0863b38a29e8abc293e80eab177d74 /drivers/gpu/drm/ttm
parent43c40df2c7fedce640a6c39fcdf58764f6bbac5c (diff)
parent7963e9db1b1f842fdc53309baa8714d38e9f5681 (diff)
Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
Pull DRM updates from Dave Airlie: "Like all good pull reqs this ends with a revert, so it must mean we tested it, [ Ed. That's _one_ way of looking at it ] This pull is missing nouveau, Ben has been stuck trying to track down a very longstanding bug that revealed itself due to some other changes. I've asked him to send you a direct pull request for nouveau once he cleans things up. I'm away until Monday so don't want to delay things, you can make a decision on that when he sends it, I have my phone so I can ack things just not really merge much. It has one trivial conflict with your tree in armada_drv.c, and also the pull request contains some component changes that are already in your tree, the base tree from Russell went via Greg's tree already, but some stuff still shows up in here that doesn't when I merge my tree into yours. Otherwise all pretty standard graphics fare, one new driver and changes all over the place. New drivers: - sti kms driver for STMicroelectronics chipsets stih416 and stih407. core: - lots of cleanups to the drm core - DP MST helper code merged - universal cursor planes. - render nodes enabled by default panel: - better panel interfaces - new panel support - non-continuous cock advertising ability ttm: - shrinker fixes i915: - hopefully ditched UMS support - runtime pm fixes - psr tracking and locking - now enabled by default - userptr fixes - backlight brightness fixes - MST support merged - runtime PM for dpms - primary planes locking fixes - gen8 hw semaphore support - fbc fixes - runtime PM on SOix sleep state hw. - mmio base page flipping - lots of vlv/chv fixes. - universal cursor planes radeon: - Hawaii fixes - display scalar support for non-fixed mode displays - new firmware format support - dpm on more asics by default - GPUVM improvements - uncached and wc GTT buffers - BOs > visible VRAM exynos: - i80 interface support - module auto-loading - ipp driver consolidated. armada: - irq handling in crtc layer only - crtc renumbering - add component support - DT interaction changes. tegra: - load as module fixes - eDP bpp and sync polarity fixed - DSI non-continuous clock mode support - better support for importing buffers from nouveau msm: - mdp5/adq8084 v1.3 hw enablement - devicetree clk changse - ifc6410 board working tda998x: - component support - DT documentation update vmwgfx: - fix compat shader namespace" * 'drm-next' of git://people.freedesktop.org/~airlied/linux: (551 commits) Revert "drm: drop redundant drm_file->is_master" drm/panel: simple: Use devm_gpiod_get_optional() drm/dsi: Replace upcasting macro by function drm/panel: ld9040: Replace upcasting macro by function drm/exynos: dp: Modify driver to support drm_panel drm/exynos: Move DP setup into commit() drm/panel: simple: Add AUO B133HTN01 panel support drm/panel: simple: Support delays in panel functions drm/panel: simple: Add proper definition for prepare and unprepare drm/panel: s6e8aa0: Add proper definition for prepare and unprepare drm/panel: ld9040: Add proper definition for prepare and unprepare drm/tegra: Add support for panel prepare and unprepare routines drm/exynos: dsi: Add support for panel prepare and unprepare routines drm/exynos: dpi: Add support for panel prepare and unprepare routines drm/panel: simple: Add dummy prepare and unprepare routines drm/panel: s6e8aa0: Add dummy prepare and unprepare routines drm/panel: ld9040: Add dummy prepare and unprepare routines drm/panel: Provide convenience wrapper for .get_modes() drm/panel: add .prepare() and .unprepare() functions drm/panel: simple: Remove simple-panel compatible ...
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c20
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_manager.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c34
6 files changed, 51 insertions, 41 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4ab9f7171c4f..3da89d5dab60 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -412,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
412 int ret; 412 int ret;
413 413
414 spin_lock(&glob->lru_lock); 414 spin_lock(&glob->lru_lock);
415 ret = __ttm_bo_reserve(bo, false, true, false, 0); 415 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
416 416
417 spin_lock(&bdev->fence_lock); 417 spin_lock(&bdev->fence_lock);
418 (void) ttm_bo_wait(bo, false, false, true); 418 (void) ttm_bo_wait(bo, false, false, true);
@@ -514,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
514 return ret; 514 return ret;
515 515
516 spin_lock(&glob->lru_lock); 516 spin_lock(&glob->lru_lock);
517 ret = __ttm_bo_reserve(bo, false, true, false, 0); 517 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
518 518
519 /* 519 /*
520 * We raced, and lost, someone else holds the reservation now, 520 * We raced, and lost, someone else holds the reservation now,
@@ -577,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
577 kref_get(&nentry->list_kref); 577 kref_get(&nentry->list_kref);
578 } 578 }
579 579
580 ret = __ttm_bo_reserve(entry, false, true, false, 0); 580 ret = __ttm_bo_reserve(entry, false, true, false, NULL);
581 if (remove_all && ret) { 581 if (remove_all && ret) {
582 spin_unlock(&glob->lru_lock); 582 spin_unlock(&glob->lru_lock);
583 ret = __ttm_bo_reserve(entry, false, false, 583 ret = __ttm_bo_reserve(entry, false, false,
584 false, 0); 584 false, NULL);
585 spin_lock(&glob->lru_lock); 585 spin_lock(&glob->lru_lock);
586 } 586 }
587 587
@@ -726,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
726 726
727 spin_lock(&glob->lru_lock); 727 spin_lock(&glob->lru_lock);
728 list_for_each_entry(bo, &man->lru, lru) { 728 list_for_each_entry(bo, &man->lru, lru) {
729 ret = __ttm_bo_reserve(bo, false, true, false, 0); 729 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
730 if (!ret) 730 if (!ret)
731 break; 731 break;
732 } 732 }
@@ -784,7 +784,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
784 int ret; 784 int ret;
785 785
786 do { 786 do {
787 ret = (*man->func->get_node)(man, bo, placement, mem); 787 ret = (*man->func->get_node)(man, bo, placement, 0, mem);
788 if (unlikely(ret != 0)) 788 if (unlikely(ret != 0))
789 return ret; 789 return ret;
790 if (mem->mm_node) 790 if (mem->mm_node)
@@ -897,7 +897,8 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
897 897
898 if (man->has_type && man->use_type) { 898 if (man->has_type && man->use_type) {
899 type_found = true; 899 type_found = true;
900 ret = (*man->func->get_node)(man, bo, placement, mem); 900 ret = (*man->func->get_node)(man, bo, placement,
901 cur_flags, mem);
901 if (unlikely(ret)) 902 if (unlikely(ret))
902 return ret; 903 return ret;
903 } 904 }
@@ -937,7 +938,6 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
937 ttm_flag_masked(&cur_flags, placement->busy_placement[i], 938 ttm_flag_masked(&cur_flags, placement->busy_placement[i],
938 ~TTM_PL_MASK_MEMTYPE); 939 ~TTM_PL_MASK_MEMTYPE);
939 940
940
941 if (mem_type == TTM_PL_SYSTEM) { 941 if (mem_type == TTM_PL_SYSTEM) {
942 mem->mem_type = mem_type; 942 mem->mem_type = mem_type;
943 mem->placement = cur_flags; 943 mem->placement = cur_flags;
@@ -1595,7 +1595,7 @@ int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
1595 * Using ttm_bo_reserve makes sure the lru lists are updated. 1595 * Using ttm_bo_reserve makes sure the lru lists are updated.
1596 */ 1596 */
1597 1597
1598 ret = ttm_bo_reserve(bo, true, no_wait, false, 0); 1598 ret = ttm_bo_reserve(bo, true, no_wait, false, NULL);
1599 if (unlikely(ret != 0)) 1599 if (unlikely(ret != 0))
1600 return ret; 1600 return ret;
1601 spin_lock(&bdev->fence_lock); 1601 spin_lock(&bdev->fence_lock);
@@ -1630,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1630 1630
1631 spin_lock(&glob->lru_lock); 1631 spin_lock(&glob->lru_lock);
1632 list_for_each_entry(bo, &glob->swap_lru, swap) { 1632 list_for_each_entry(bo, &glob->swap_lru, swap) {
1633 ret = __ttm_bo_reserve(bo, false, true, false, 0); 1633 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
1634 if (!ret) 1634 if (!ret)
1635 break; 1635 break;
1636 } 1636 }
diff --git a/drivers/gpu/drm/ttm/ttm_bo_manager.c b/drivers/gpu/drm/ttm/ttm_bo_manager.c
index bd850c9f4bca..9e103a4875c8 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_manager.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_manager.c
@@ -50,6 +50,7 @@ struct ttm_range_manager {
50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, 50static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
51 struct ttm_buffer_object *bo, 51 struct ttm_buffer_object *bo,
52 struct ttm_placement *placement, 52 struct ttm_placement *placement,
53 uint32_t flags,
53 struct ttm_mem_reg *mem) 54 struct ttm_mem_reg *mem)
54{ 55{
55 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; 56 struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;
@@ -67,7 +68,7 @@ static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man,
67 if (!node) 68 if (!node)
68 return -ENOMEM; 69 return -ENOMEM;
69 70
70 if (bo->mem.placement & TTM_PL_FLAG_TOPDOWN) 71 if (flags & TTM_PL_FLAG_TOPDOWN)
71 aflags = DRM_MM_CREATE_TOP; 72 aflags = DRM_MM_CREATE_TOP;
72 73
73 spin_lock(&rman->lock); 74 spin_lock(&rman->lock);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 1df856f78568..30e5d90cb7bc 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -500,7 +500,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
500 pgprot_val(tmp) |= _PAGE_GUARDED; 500 pgprot_val(tmp) |= _PAGE_GUARDED;
501 } 501 }
502#endif 502#endif
503#if defined(__ia64__) 503#if defined(__ia64__) || defined(__arm__)
504 if (caching_flags & TTM_PL_FLAG_WC) 504 if (caching_flags & TTM_PL_FLAG_WC)
505 tmp = pgprot_writecombine(tmp); 505 tmp = pgprot_writecombine(tmp);
506 else 506 else
diff --git a/drivers/gpu/drm/ttm/ttm_module.c b/drivers/gpu/drm/ttm/ttm_module.c
index d7f92fe9d904..66fc6395eb54 100644
--- a/drivers/gpu/drm/ttm/ttm_module.c
+++ b/drivers/gpu/drm/ttm/ttm_module.c
@@ -35,7 +35,7 @@
35#include <drm/drm_sysfs.h> 35#include <drm/drm_sysfs.h>
36 36
37static DECLARE_WAIT_QUEUE_HEAD(exit_q); 37static DECLARE_WAIT_QUEUE_HEAD(exit_q);
38atomic_t device_released; 38static atomic_t device_released;
39 39
40static struct device_type ttm_drm_class_type = { 40static struct device_type ttm_drm_class_type = {
41 .name = "ttm", 41 .name = "ttm",
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 863bef9f9234..09874d695188 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -297,8 +297,10 @@ static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
297 * 297 *
298 * @pool: to free the pages from 298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool 299 * @free_all: If set to true will free all pages in pool
300 * @gfp: GFP flags.
300 **/ 301 **/
301static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free) 302static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free,
303 gfp_t gfp)
302{ 304{
303 unsigned long irq_flags; 305 unsigned long irq_flags;
304 struct page *p; 306 struct page *p;
@@ -309,8 +311,7 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
309 if (NUM_PAGES_TO_ALLOC < nr_free) 311 if (NUM_PAGES_TO_ALLOC < nr_free)
310 npages_to_free = NUM_PAGES_TO_ALLOC; 312 npages_to_free = NUM_PAGES_TO_ALLOC;
311 313
312 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 314 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
313 GFP_KERNEL);
314 if (!pages_to_free) { 315 if (!pages_to_free) {
315 pr_err("Failed to allocate memory for pool free operation\n"); 316 pr_err("Failed to allocate memory for pool free operation\n");
316 return 0; 317 return 0;
@@ -382,32 +383,35 @@ out:
382 * 383 *
383 * XXX: (dchinner) Deadlock warning! 384 * XXX: (dchinner) Deadlock warning!
384 * 385 *
385 * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means 386 * We need to pass sc->gfp_mask to ttm_page_pool_free().
386 * this can deadlock when called a sc->gfp_mask that is not equal to
387 * GFP_KERNEL.
388 * 387 *
389 * This code is crying out for a shrinker per pool.... 388 * This code is crying out for a shrinker per pool....
390 */ 389 */
391static unsigned long 390static unsigned long
392ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 391ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
393{ 392{
394 static atomic_t start_pool = ATOMIC_INIT(0); 393 static DEFINE_MUTEX(lock);
394 static unsigned start_pool;
395 unsigned i; 395 unsigned i;
396 unsigned pool_offset = atomic_add_return(1, &start_pool); 396 unsigned pool_offset;
397 struct ttm_page_pool *pool; 397 struct ttm_page_pool *pool;
398 int shrink_pages = sc->nr_to_scan; 398 int shrink_pages = sc->nr_to_scan;
399 unsigned long freed = 0; 399 unsigned long freed = 0;
400 400
401 pool_offset = pool_offset % NUM_POOLS; 401 if (!mutex_trylock(&lock))
402 return SHRINK_STOP;
403 pool_offset = ++start_pool % NUM_POOLS;
402 /* select start pool in round robin fashion */ 404 /* select start pool in round robin fashion */
403 for (i = 0; i < NUM_POOLS; ++i) { 405 for (i = 0; i < NUM_POOLS; ++i) {
404 unsigned nr_free = shrink_pages; 406 unsigned nr_free = shrink_pages;
405 if (shrink_pages == 0) 407 if (shrink_pages == 0)
406 break; 408 break;
407 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS]; 409 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
408 shrink_pages = ttm_page_pool_free(pool, nr_free); 410 shrink_pages = ttm_page_pool_free(pool, nr_free,
411 sc->gfp_mask);
409 freed += nr_free - shrink_pages; 412 freed += nr_free - shrink_pages;
410 } 413 }
414 mutex_unlock(&lock);
411 return freed; 415 return freed;
412} 416}
413 417
@@ -706,7 +710,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags,
706 } 710 }
707 spin_unlock_irqrestore(&pool->lock, irq_flags); 711 spin_unlock_irqrestore(&pool->lock, irq_flags);
708 if (npages) 712 if (npages)
709 ttm_page_pool_free(pool, npages); 713 ttm_page_pool_free(pool, npages, GFP_KERNEL);
710} 714}
711 715
712/* 716/*
@@ -790,7 +794,7 @@ static int ttm_get_pages(struct page **pages, unsigned npages, int flags,
790 return 0; 794 return 0;
791} 795}
792 796
793static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags, 797static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, gfp_t flags,
794 char *name) 798 char *name)
795{ 799{
796 spin_lock_init(&pool->lock); 800 spin_lock_init(&pool->lock);
@@ -846,7 +850,8 @@ void ttm_page_alloc_fini(void)
846 ttm_pool_mm_shrink_fini(_manager); 850 ttm_pool_mm_shrink_fini(_manager);
847 851
848 for (i = 0; i < NUM_POOLS; ++i) 852 for (i = 0; i < NUM_POOLS; ++i)
849 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES); 853 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES,
854 GFP_KERNEL);
850 855
851 kobject_put(&_manager->kobj); 856 kobject_put(&_manager->kobj);
852 _manager = NULL; 857 _manager = NULL;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index fb8259f69839..ca65df144765 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -411,8 +411,10 @@ static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
411 * 411 *
412 * @pool: to free the pages from 412 * @pool: to free the pages from
413 * @nr_free: If set to true will free all pages in pool 413 * @nr_free: If set to true will free all pages in pool
414 * @gfp: GFP flags.
414 **/ 415 **/
415static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free) 416static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free,
417 gfp_t gfp)
416{ 418{
417 unsigned long irq_flags; 419 unsigned long irq_flags;
418 struct dma_page *dma_p, *tmp; 420 struct dma_page *dma_p, *tmp;
@@ -430,8 +432,7 @@ static unsigned ttm_dma_page_pool_free(struct dma_pool *pool, unsigned nr_free)
430 npages_to_free, nr_free); 432 npages_to_free, nr_free);
431 } 433 }
432#endif 434#endif
433 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), 435 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *), gfp);
434 GFP_KERNEL);
435 436
436 if (!pages_to_free) { 437 if (!pages_to_free) {
437 pr_err("%s: Failed to allocate memory for pool free operation\n", 438 pr_err("%s: Failed to allocate memory for pool free operation\n",
@@ -530,7 +531,7 @@ static void ttm_dma_free_pool(struct device *dev, enum pool_type type)
530 if (pool->type != type) 531 if (pool->type != type)
531 continue; 532 continue;
532 /* Takes a spinlock.. */ 533 /* Takes a spinlock.. */
533 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES); 534 ttm_dma_page_pool_free(pool, FREE_ALL_PAGES, GFP_KERNEL);
534 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0)); 535 WARN_ON(((pool->npages_in_use + pool->npages_free) != 0));
535 /* This code path is called after _all_ references to the 536 /* This code path is called after _all_ references to the
536 * struct device has been dropped - so nobody should be 537 * struct device has been dropped - so nobody should be
@@ -983,7 +984,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
983 984
984 /* shrink pool if necessary (only on !is_cached pools)*/ 985 /* shrink pool if necessary (only on !is_cached pools)*/
985 if (npages) 986 if (npages)
986 ttm_dma_page_pool_free(pool, npages); 987 ttm_dma_page_pool_free(pool, npages, GFP_KERNEL);
987 ttm->state = tt_unpopulated; 988 ttm->state = tt_unpopulated;
988} 989}
989EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); 990EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
@@ -993,10 +994,7 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
993 * 994 *
994 * XXX: (dchinner) Deadlock warning! 995 * XXX: (dchinner) Deadlock warning!
995 * 996 *
996 * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention 997 * We need to pass sc->gfp_mask to ttm_dma_page_pool_free().
997 * needs to be paid to sc->gfp_mask to determine if this can be done or not.
998 * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
999 * bad.
1000 * 998 *
1001 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool 999 * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
1002 * shrinkers 1000 * shrinkers
@@ -1004,9 +1002,9 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
1004static unsigned long 1002static unsigned long
1005ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 1003ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1006{ 1004{
1007 static atomic_t start_pool = ATOMIC_INIT(0); 1005 static unsigned start_pool;
1008 unsigned idx = 0; 1006 unsigned idx = 0;
1009 unsigned pool_offset = atomic_add_return(1, &start_pool); 1007 unsigned pool_offset;
1010 unsigned shrink_pages = sc->nr_to_scan; 1008 unsigned shrink_pages = sc->nr_to_scan;
1011 struct device_pools *p; 1009 struct device_pools *p;
1012 unsigned long freed = 0; 1010 unsigned long freed = 0;
@@ -1014,8 +1012,11 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1014 if (list_empty(&_manager->pools)) 1012 if (list_empty(&_manager->pools))
1015 return SHRINK_STOP; 1013 return SHRINK_STOP;
1016 1014
1017 mutex_lock(&_manager->lock); 1015 if (!mutex_trylock(&_manager->lock))
1018 pool_offset = pool_offset % _manager->npools; 1016 return SHRINK_STOP;
1017 if (!_manager->npools)
1018 goto out;
1019 pool_offset = ++start_pool % _manager->npools;
1019 list_for_each_entry(p, &_manager->pools, pools) { 1020 list_for_each_entry(p, &_manager->pools, pools) {
1020 unsigned nr_free; 1021 unsigned nr_free;
1021 1022
@@ -1027,13 +1028,15 @@ ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1027 if (++idx < pool_offset) 1028 if (++idx < pool_offset)
1028 continue; 1029 continue;
1029 nr_free = shrink_pages; 1030 nr_free = shrink_pages;
1030 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free); 1031 shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free,
1032 sc->gfp_mask);
1031 freed += nr_free - shrink_pages; 1033 freed += nr_free - shrink_pages;
1032 1034
1033 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n", 1035 pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
1034 p->pool->dev_name, p->pool->name, current->pid, 1036 p->pool->dev_name, p->pool->name, current->pid,
1035 nr_free, shrink_pages); 1037 nr_free, shrink_pages);
1036 } 1038 }
1039out:
1037 mutex_unlock(&_manager->lock); 1040 mutex_unlock(&_manager->lock);
1038 return freed; 1041 return freed;
1039} 1042}
@@ -1044,7 +1047,8 @@ ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1044 struct device_pools *p; 1047 struct device_pools *p;
1045 unsigned long count = 0; 1048 unsigned long count = 0;
1046 1049
1047 mutex_lock(&_manager->lock); 1050 if (!mutex_trylock(&_manager->lock))
1051 return 0;
1048 list_for_each_entry(p, &_manager->pools, pools) 1052 list_for_each_entry(p, &_manager->pools, pools)
1049 count += p->pool->npages_free; 1053 count += p->pool->npages_free;
1050 mutex_unlock(&_manager->lock); 1054 mutex_unlock(&_manager->lock);