aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-06-09 07:03:46 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2017-06-14 05:51:50 -0400
commit4846bf0ca8cb4304dde6140eff33a92b3fe8ef24 (patch)
tree9158453a02fcefc45345a277e31529e8cba89863
parentda411a48bdeb648153dc9a485c15c18f3d063eac (diff)
drm/i915: Encourage our shrinker more when our shmemfs allocations fails
Commit 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") made the bold decision to try and avoid the oomkiller by reporting -ENOMEM to userspace if our allocation failed after attempting to free enough buffer objects. In short, it appears we were giving up too easily (even before we start wondering if one pass of reclaim is as strong as we would like). Part of the problem is that if we only shrink just enough pages for our expected allocation, the likelihood of those pages becoming available to us is less than 100% To counter-act that we ask for twice the number of pages to be made available. Furthermore, we allow the shrinker to pull pages from the active list in later passes. v2: Be a little more cautious in paging out gfx buffers, and leave that to a more balanced approach from shrink_slab(). Important when combined with "drm/i915: Start writeback from the shrinker" as anything shrunk is immediately swapped out and so should be more conservative. Fixes: 24f8e00a8a2e ("drm/i915: Prefer to report ENOMEM rather than incur the oom for gfx allocations") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Mika Kuoppala <mika.kuoppala@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20170609110350.1767-1-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c50
1 files changed, 29 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index aff449807399..ca61a0be1458 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2337,8 +2337,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
2337 struct page *page; 2337 struct page *page;
2338 unsigned long last_pfn = 0; /* suppress gcc warning */ 2338 unsigned long last_pfn = 0; /* suppress gcc warning */
2339 unsigned int max_segment; 2339 unsigned int max_segment;
2340 gfp_t noreclaim;
2340 int ret; 2341 int ret;
2341 gfp_t gfp;
2342 2342
2343 /* Assert that the object is not currently in any GPU domain. As it 2343 /* Assert that the object is not currently in any GPU domain. As it
2344 * wasn't in the GTT, there shouldn't be any way it could have been in 2344 * wasn't in the GTT, there shouldn't be any way it could have been in
@@ -2367,22 +2367,31 @@ rebuild_st:
2367 * Fail silently without starting the shrinker 2367 * Fail silently without starting the shrinker
2368 */ 2368 */
2369 mapping = obj->base.filp->f_mapping; 2369 mapping = obj->base.filp->f_mapping;
2370 gfp = mapping_gfp_constraint(mapping, ~(__GFP_IO | __GFP_RECLAIM)); 2370 noreclaim = mapping_gfp_constraint(mapping,
2371 gfp |= __GFP_NORETRY | __GFP_NOWARN; 2371 ~(__GFP_IO | __GFP_RECLAIM));
2372 noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
2373
2372 sg = st->sgl; 2374 sg = st->sgl;
2373 st->nents = 0; 2375 st->nents = 0;
2374 for (i = 0; i < page_count; i++) { 2376 for (i = 0; i < page_count; i++) {
2375 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2377 const unsigned int shrink[] = {
2376 if (unlikely(IS_ERR(page))) { 2378 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
2377 i915_gem_shrink(dev_priv, 2379 0,
2378 page_count, 2380 }, *s = shrink;
2379 I915_SHRINK_BOUND | 2381 gfp_t gfp = noreclaim;
2380 I915_SHRINK_UNBOUND | 2382
2381 I915_SHRINK_PURGEABLE); 2383 do {
2382 page = shmem_read_mapping_page_gfp(mapping, i, gfp); 2384 page = shmem_read_mapping_page_gfp(mapping, i, gfp);
2383 } 2385 if (likely(!IS_ERR(page)))
2384 if (unlikely(IS_ERR(page))) { 2386 break;
2385 gfp_t reclaim; 2387
2388 if (!*s) {
2389 ret = PTR_ERR(page);
2390 goto err_sg;
2391 }
2392
2393 i915_gem_shrink(dev_priv, 2 * page_count, *s++);
2394 cond_resched();
2386 2395
2387 /* We've tried hard to allocate the memory by reaping 2396 /* We've tried hard to allocate the memory by reaping
2388 * our own buffer, now let the real VM do its job and 2397 * our own buffer, now let the real VM do its job and
@@ -2392,15 +2401,13 @@ rebuild_st:
2392 * defer the oom here by reporting the ENOMEM back 2401 * defer the oom here by reporting the ENOMEM back
2393 * to userspace. 2402 * to userspace.
2394 */ 2403 */
2395 reclaim = mapping_gfp_mask(mapping); 2404 if (!*s) {
2396 reclaim |= __GFP_NORETRY; /* reclaim, but no oom */ 2405 /* reclaim and warn, but no oom */
2397 2406 gfp = mapping_gfp_mask(mapping);
2398 page = shmem_read_mapping_page_gfp(mapping, i, reclaim); 2407 gfp |= __GFP_NORETRY;
2399 if (IS_ERR(page)) {
2400 ret = PTR_ERR(page);
2401 goto err_sg;
2402 } 2408 }
2403 } 2409 } while (1);
2410
2404 if (!i || 2411 if (!i ||
2405 sg->length >= max_segment || 2412 sg->length >= max_segment ||
2406 page_to_pfn(page) != last_pfn + 1) { 2413 page_to_pfn(page) != last_pfn + 1) {
@@ -4285,6 +4292,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
4285 4292
4286 mapping = obj->base.filp->f_mapping; 4293 mapping = obj->base.filp->f_mapping;
4287 mapping_set_gfp_mask(mapping, mask); 4294 mapping_set_gfp_mask(mapping, mask);
4295 GEM_BUG_ON(!(mapping_gfp_mask(mapping) & __GFP_RECLAIM));
4288 4296
4289 i915_gem_object_init(obj, &i915_gem_object_ops); 4297 i915_gem_object_init(obj, &i915_gem_object_ops);
4290 4298