aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2015-07-27 05:26:26 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-07-28 10:05:41 -0400
commit6c24695988774d56ea8d6a47ffdaf0fd32072488 (patch)
treea74aa6124c15ffc6113087446c0ee527aa3f02e9 /drivers/gpu/drm/i915/i915_gem.c
parent3b9a02e844948fc14cb32a06bc00e0e61bde3577 (diff)
drm/i915: Keep the mm.bound_list in rough LRU order
When we shrink our working sets, we want to avoid stealing pages from objects that likely to be reused in the near future. We first look at inactive objects before processing active objects - but what about a recently active object that is about to be used again. That object's position in the bound_list is ordered by the time of binding, not the time of last use, so the most recently used inactive object could well be at the head of the shrink list. To compensate, give the object a bump to MRU when it becomes inactive (thus transitioning to the end of the first pass in shrink lists). Conversely, bumping on inactive makes bumping on active useless, since when we do have to reap from the active working set, everything is going to become inactive very quickly and the order pretty much random - just hope for the best at that point, as once we start stalling on active objects, we can hope that the rebinding neatly orders vital objects. Suggested-by: Daniel Vetter <daniel.vetter@ffwll.ch> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Daniel Vetter <daniel.vetter@ffwll.ch> [danvet: Resolve merge conflict.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5d685789b1f9..84f91bcc12f7 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2385,6 +2385,13 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
2385 if (obj->active) 2385 if (obj->active)
2386 return; 2386 return;
2387 2387
2388 /* Bump our place on the bound list to keep it roughly in LRU order
2389 * so that we don't steal from recently used but inactive objects
2390 * (unless we are forced to ofc!)
2391 */
2392 list_move_tail(&obj->global_list,
2393 &to_i915(obj->base.dev)->mm.bound_list);
2394
2388 list_for_each_entry(vma, &obj->vma_list, vma_link) { 2395 list_for_each_entry(vma, &obj->vma_list, vma_link) {
2389 if (!list_empty(&vma->mm_list)) 2396 if (!list_empty(&vma->mm_list))
2390 list_move_tail(&vma->mm_list, &vma->vm->inactive_list); 2397 list_move_tail(&vma->mm_list, &vma->vm->inactive_list);