aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_shrinker.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2015-12-04 10:58:54 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2016-01-05 05:05:38 -0500
commitc1a415e261aad096b3458ba9157fefd123aa7cbf (patch)
tree18e477c44837e73befe65a37a4356aaf00f17e13 /drivers/gpu/drm/i915/i915_gem_shrinker.c
parentfb0fec501f08a0a83af7a2b25888ec8cebab53b0 (diff)
drm/i915: Disable shrinker for non-swapped backed objects
If the system has no available swap pages, we cannot make forward progress in the shrinker by releasing active pages, only by releasing purgeable pages which are immediately reaped. Take total_swap_pages into account when counting up available objects to be shrunk and subsequently shrinking them. By doing so, we avoid unbinding objects that cannot be shrunk and so wasting CPU cycles flushing those objects from the GPU to the system and then immediately back again (as they will more than likely be reused shortly after). Based on a patch by Akash Goel. v2: frontswap registers extra swap pages available for the system, so it is already include in the count of available swap pages. v3: Use get_nr_swap_pages() to query the currently available amount of swap space. This should also stop us from shrinking the GPU buffers if we ever run out of swap space. Though at that point, we would expect the oom-notifier to be running and failing miserably... Reported-by: Akash Goel <akash.goel@intel.com> Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: linux-mm@kvack.org Cc: Akash Goel <akash.goel@intel.com> Cc: sourab.gupta@intel.com Link: http://patchwork.freedesktop.org/patch/msgid/1449244734-25733-2-git-send-email-chris@chris-wilson.co.uk Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_shrinker.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_shrinker.c60
1 files changed, 44 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c
index f7df54a8ee2b..16da9c1422cc 100644
--- a/drivers/gpu/drm/i915/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c
@@ -47,6 +47,46 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
47#endif 47#endif
48} 48}
49 49
50static int num_vma_bound(struct drm_i915_gem_object *obj)
51{
52 struct i915_vma *vma;
53 int count = 0;
54
55 list_for_each_entry(vma, &obj->vma_list, vma_link) {
56 if (drm_mm_node_allocated(&vma->node))
57 count++;
58 if (vma->pin_count)
59 count++;
60 }
61
62 return count;
63}
64
65static bool swap_available(void)
66{
67 return get_nr_swap_pages() > 0;
68}
69
70static bool can_release_pages(struct drm_i915_gem_object *obj)
71{
72 /* Only report true if by unbinding the object and putting its pages
73 * we can actually make forward progress towards freeing physical
74 * pages.
75 *
76 * If the pages are pinned for any other reason than being bound
77 * to the GPU, simply unbinding from the GPU is not going to succeed
78 * in releasing our pin count on the pages themselves.
79 */
80 if (obj->pages_pin_count != num_vma_bound(obj))
81 return false;
82
83 /* We can only return physical pages to the system if we can either
84 * discard the contents (because the user has marked them as being
85 * purgeable) or if we can move their contents out to swap.
86 */
87 return swap_available() || obj->madv == I915_MADV_DONTNEED;
88}
89
50/** 90/**
51 * i915_gem_shrink - Shrink buffer object caches 91 * i915_gem_shrink - Shrink buffer object caches
52 * @dev_priv: i915 device 92 * @dev_priv: i915 device
@@ -129,6 +169,9 @@ i915_gem_shrink(struct drm_i915_private *dev_priv,
129 if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) 169 if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active)
130 continue; 170 continue;
131 171
172 if (!can_release_pages(obj))
173 continue;
174
132 drm_gem_object_reference(&obj->base); 175 drm_gem_object_reference(&obj->base);
133 176
134 /* For the unbound phase, this should be a no-op! */ 177 /* For the unbound phase, this should be a no-op! */
@@ -188,21 +231,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock)
188 return true; 231 return true;
189} 232}
190 233
191static int num_vma_bound(struct drm_i915_gem_object *obj)
192{
193 struct i915_vma *vma;
194 int count = 0;
195
196 list_for_each_entry(vma, &obj->vma_list, vma_link) {
197 if (drm_mm_node_allocated(&vma->node))
198 count++;
199 if (vma->pin_count)
200 count++;
201 }
202
203 return count;
204}
205
206static unsigned long 234static unsigned long
207i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) 235i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
208{ 236{
@@ -222,7 +250,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
222 count += obj->base.size >> PAGE_SHIFT; 250 count += obj->base.size >> PAGE_SHIFT;
223 251
224 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { 252 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
225 if (!obj->active && obj->pages_pin_count == num_vma_bound(obj)) 253 if (!obj->active && can_release_pages(obj))
226 count += obj->base.size >> PAGE_SHIFT; 254 count += obj->base.size >> PAGE_SHIFT;
227 } 255 }
228 256