From 71336e011d1d2312bcbcaa8fcec7365024f3a95d Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Sun, 3 Aug 2014 20:02:03 +0900 Subject: [PATCH] drm/ttm: Fix possible stack overflow by recursive shrinker calls. While ttm_dma_pool_shrink_scan() tries to take mutex before doing GFP_KERNEL allocation, ttm_pool_shrink_scan() does not do it. This can result in stack overflow if kmalloc() in ttm_page_pool_free() triggered recursion due to memory pressure. shrink_slab() => ttm_pool_shrink_scan() => ttm_page_pool_free() => kmalloc(GFP_KERNEL) => shrink_slab() => ttm_pool_shrink_scan() => ttm_page_pool_free() => kmalloc(GFP_KERNEL) Change ttm_pool_shrink_scan() to do like ttm_dma_pool_shrink_scan() does. Signed-off-by: Tetsuo Handa Cc: stable [2.6.35+] Signed-off-by: Dave Airlie --- drivers/gpu/drm/ttm/ttm_page_alloc.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index beb8e75a3f00..edb83151041f 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -391,14 +391,17 @@ out: static unsigned long ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { - static atomic_t start_pool = ATOMIC_INIT(0); + static DEFINE_MUTEX(lock); + static unsigned start_pool; unsigned i; - unsigned pool_offset = atomic_add_return(1, &start_pool); + unsigned pool_offset; struct ttm_page_pool *pool; int shrink_pages = sc->nr_to_scan; unsigned long freed = 0; - pool_offset = pool_offset % NUM_POOLS; + if (!mutex_trylock(&lock)) + return SHRINK_STOP; + pool_offset = ++start_pool % NUM_POOLS; /* select start pool in round robin fashion */ for (i = 0; i < NUM_POOLS; ++i) { unsigned nr_free = shrink_pages; @@ -408,6 +411,7 @@ ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) shrink_pages = ttm_page_pool_free(pool, nr_free); freed += nr_free - shrink_pages; } + mutex_unlock(&lock); return freed; } -- 2.39.2