aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/ttm
diff options
context:
space:
mode:
authorTetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>2014-08-03 07:02:03 -0400
committerDave Airlie <airlied@redhat.com>2014-08-04 20:54:10 -0400
commit71336e011d1d2312bcbcaa8fcec7365024f3a95d (patch)
tree3e4a416ed2730c64b5fbb4975776d6f0b7041903 /drivers/gpu/drm/ttm
parent22e71691fd54c637800d10816bbeba9cf132d218 (diff)
drm/ttm: Fix possible stack overflow by recursive shrinker calls.
While ttm_dma_pool_shrink_scan() tries to take mutex before doing GFP_KERNEL allocation, ttm_pool_shrink_scan() does not do it. This can result in stack overflow if kmalloc() in ttm_page_pool_free() triggered recursion due to memory pressure. shrink_slab() => ttm_pool_shrink_scan() => ttm_page_pool_free() => kmalloc(GFP_KERNEL) => shrink_slab() => ttm_pool_shrink_scan() => ttm_page_pool_free() => kmalloc(GFP_KERNEL) Change ttm_pool_shrink_scan() to do like ttm_dma_pool_shrink_scan() does. Signed-off-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: stable <stable@kernel.org> [2.6.35+] Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers/gpu/drm/ttm')
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c10
1 files changed, 7 insertions, 3 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index beb8e75a3f00..edb83151041f 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -391,14 +391,17 @@ out:
391static unsigned long 391static unsigned long
392ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) 392ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
393{ 393{
394 static atomic_t start_pool = ATOMIC_INIT(0); 394 static DEFINE_MUTEX(lock);
395 static unsigned start_pool;
395 unsigned i; 396 unsigned i;
396 unsigned pool_offset = atomic_add_return(1, &start_pool); 397 unsigned pool_offset;
397 struct ttm_page_pool *pool; 398 struct ttm_page_pool *pool;
398 int shrink_pages = sc->nr_to_scan; 399 int shrink_pages = sc->nr_to_scan;
399 unsigned long freed = 0; 400 unsigned long freed = 0;
400 401
401 pool_offset = pool_offset % NUM_POOLS; 402 if (!mutex_trylock(&lock))
403 return SHRINK_STOP;
404 pool_offset = ++start_pool % NUM_POOLS;
402 /* select start pool in round robin fashion */ 405 /* select start pool in round robin fashion */
403 for (i = 0; i < NUM_POOLS; ++i) { 406 for (i = 0; i < NUM_POOLS; ++i) {
404 unsigned nr_free = shrink_pages; 407 unsigned nr_free = shrink_pages;
@@ -408,6 +411,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
408 shrink_pages = ttm_page_pool_free(pool, nr_free); 411 shrink_pages = ttm_page_pool_free(pool, nr_free);
409 freed += nr_free - shrink_pages; 412 freed += nr_free - shrink_pages;
410 } 413 }
414 mutex_unlock(&lock);
411 return freed; 415 return freed;
412} 416}
413 417