aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-01-21 12:31:43 -0500
committerRodrigo Vivi <rodrigo.vivi@intel.com>2018-02-01 10:32:46 -0500
commit124804c4c4b2e84e956cedd17878562e230a907f (patch)
treeac7d8efe51b78395d3cc4bb059c0d34102fde8b6
parentc5bd1fc9a6c843c85a5cea5765cdc997f832df3c (diff)
drm/i915: Protect WC stash allocation against direct reclaim
As we attempt to allocate pages for use in a new WC stash, direct reclaim may run underneath us and fill up the WC stash. We have to be careful then not to overflow the pvec. Fixes: 66df1014efba ("drm/i915: Keep a small stash of preallocated WC pages") Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=103109 Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Matthew Auld <matthew.auld@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Matthew Auld <matthew.auld@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180121173143.17090-1-chris@chris-wilson.co.uk (cherry picked from commit 073cd7816685ac77c6d8b4d321a5586c9177b76a) Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c32
1 files changed, 25 insertions, 7 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 0de4f3f13fc4..133cb9590003 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -377,6 +377,7 @@ static gen6_pte_t iris_pte_encode(dma_addr_t addr,
377static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) 377static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
378{ 378{
379 struct pagevec *pvec = &vm->free_pages; 379 struct pagevec *pvec = &vm->free_pages;
380 struct pagevec stash;
380 381
381 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) 382 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1)))
382 i915_gem_shrink_all(vm->i915); 383 i915_gem_shrink_all(vm->i915);
@@ -395,7 +396,15 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
395 if (likely(pvec->nr)) 396 if (likely(pvec->nr))
396 return pvec->pages[--pvec->nr]; 397 return pvec->pages[--pvec->nr];
397 398
398 /* Otherwise batch allocate pages to amoritize cost of set_pages_wc. */ 399 /*
400 * Otherwise batch allocate pages to amoritize cost of set_pages_wc.
401 *
402 * We have to be careful as page allocation may trigger the shrinker
403 * (via direct reclaim) which will fill up the WC stash underneath us.
404 * So we add our WB pages into a temporary pvec on the stack and merge
405 * them into the WC stash after all the allocations are complete.
406 */
407 pagevec_init(&stash);
399 do { 408 do {
400 struct page *page; 409 struct page *page;
401 410
@@ -403,15 +412,24 @@ static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp)
403 if (unlikely(!page)) 412 if (unlikely(!page))
404 break; 413 break;
405 414
406 pvec->pages[pvec->nr++] = page; 415 stash.pages[stash.nr++] = page;
407 } while (pagevec_space(pvec)); 416 } while (stash.nr < pagevec_space(pvec));
408 417
409 if (unlikely(!pvec->nr)) 418 if (stash.nr) {
410 return NULL; 419 int nr = min_t(int, stash.nr, pagevec_space(pvec));
420 struct page **pages = stash.pages + stash.nr - nr;
411 421
412 set_pages_array_wc(pvec->pages, pvec->nr); 422 if (nr && !set_pages_array_wc(pages, nr)) {
423 memcpy(pvec->pages + pvec->nr,
424 pages, sizeof(pages[0]) * nr);
425 pvec->nr += nr;
426 stash.nr -= nr;
427 }
428
429 pagevec_release(&stash);
430 }
413 431
414 return pvec->pages[--pvec->nr]; 432 return likely(pvec->nr) ? pvec->pages[--pvec->nr] : NULL;
415} 433}
416 434
417static void vm_free_pages_release(struct i915_address_space *vm, 435static void vm_free_pages_release(struct i915_address_space *vm,