diff options
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_shrinker.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_shrinker.c | 78 |
1 files changed, 41 insertions, 37 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_shrinker.c b/drivers/gpu/drm/i915/i915_gem_shrinker.c index 6f10b421487b..b80802b35353 100644 --- a/drivers/gpu/drm/i915/i915_gem_shrinker.c +++ b/drivers/gpu/drm/i915/i915_gem_shrinker.c | |||
@@ -48,19 +48,15 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) | |||
48 | #endif | 48 | #endif |
49 | } | 49 | } |
50 | 50 | ||
51 | static int num_vma_bound(struct drm_i915_gem_object *obj) | 51 | static bool any_vma_pinned(struct drm_i915_gem_object *obj) |
52 | { | 52 | { |
53 | struct i915_vma *vma; | 53 | struct i915_vma *vma; |
54 | int count = 0; | ||
55 | 54 | ||
56 | list_for_each_entry(vma, &obj->vma_list, obj_link) { | 55 | list_for_each_entry(vma, &obj->vma_list, obj_link) |
57 | if (drm_mm_node_allocated(&vma->node)) | 56 | if (i915_vma_is_pinned(vma)) |
58 | count++; | 57 | return true; |
59 | if (vma->pin_count) | ||
60 | count++; | ||
61 | } | ||
62 | 58 | ||
63 | return count; | 59 | return false; |
64 | } | 60 | } |
65 | 61 | ||
66 | static bool swap_available(void) | 62 | static bool swap_available(void) |
@@ -82,7 +78,10 @@ static bool can_release_pages(struct drm_i915_gem_object *obj) | |||
82 | * to the GPU, simply unbinding from the GPU is not going to succeed | 78 | * to the GPU, simply unbinding from the GPU is not going to succeed |
83 | * in releasing our pin count on the pages themselves. | 79 | * in releasing our pin count on the pages themselves. |
84 | */ | 80 | */ |
85 | if (obj->pages_pin_count != num_vma_bound(obj)) | 81 | if (obj->pages_pin_count > obj->bind_count) |
82 | return false; | ||
83 | |||
84 | if (any_vma_pinned(obj)) | ||
86 | return false; | 85 | return false; |
87 | 86 | ||
88 | /* We can only return physical pages to the system if we can either | 87 | /* We can only return physical pages to the system if we can either |
@@ -163,17 +162,16 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, | |||
163 | */ | 162 | */ |
164 | for (phase = phases; phase->list; phase++) { | 163 | for (phase = phases; phase->list; phase++) { |
165 | struct list_head still_in_list; | 164 | struct list_head still_in_list; |
165 | struct drm_i915_gem_object *obj; | ||
166 | 166 | ||
167 | if ((flags & phase->bit) == 0) | 167 | if ((flags & phase->bit) == 0) |
168 | continue; | 168 | continue; |
169 | 169 | ||
170 | INIT_LIST_HEAD(&still_in_list); | 170 | INIT_LIST_HEAD(&still_in_list); |
171 | while (count < target && !list_empty(phase->list)) { | 171 | while (count < target && |
172 | struct drm_i915_gem_object *obj; | 172 | (obj = list_first_entry_or_null(phase->list, |
173 | struct i915_vma *vma, *v; | 173 | typeof(*obj), |
174 | 174 | global_list))) { | |
175 | obj = list_first_entry(phase->list, | ||
176 | typeof(*obj), global_list); | ||
177 | list_move_tail(&obj->global_list, &still_in_list); | 175 | list_move_tail(&obj->global_list, &still_in_list); |
178 | 176 | ||
179 | if (flags & I915_SHRINK_PURGEABLE && | 177 | if (flags & I915_SHRINK_PURGEABLE && |
@@ -184,24 +182,21 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, | |||
184 | !is_vmalloc_addr(obj->mapping)) | 182 | !is_vmalloc_addr(obj->mapping)) |
185 | continue; | 183 | continue; |
186 | 184 | ||
187 | if ((flags & I915_SHRINK_ACTIVE) == 0 && obj->active) | 185 | if ((flags & I915_SHRINK_ACTIVE) == 0 && |
186 | i915_gem_object_is_active(obj)) | ||
188 | continue; | 187 | continue; |
189 | 188 | ||
190 | if (!can_release_pages(obj)) | 189 | if (!can_release_pages(obj)) |
191 | continue; | 190 | continue; |
192 | 191 | ||
193 | drm_gem_object_reference(&obj->base); | 192 | i915_gem_object_get(obj); |
194 | 193 | ||
195 | /* For the unbound phase, this should be a no-op! */ | 194 | /* For the unbound phase, this should be a no-op! */ |
196 | list_for_each_entry_safe(vma, v, | 195 | i915_gem_object_unbind(obj); |
197 | &obj->vma_list, obj_link) | ||
198 | if (i915_vma_unbind(vma)) | ||
199 | break; | ||
200 | |||
201 | if (i915_gem_object_put_pages(obj) == 0) | 196 | if (i915_gem_object_put_pages(obj) == 0) |
202 | count += obj->base.size >> PAGE_SHIFT; | 197 | count += obj->base.size >> PAGE_SHIFT; |
203 | 198 | ||
204 | drm_gem_object_unreference(&obj->base); | 199 | i915_gem_object_put(obj); |
205 | } | 200 | } |
206 | list_splice(&still_in_list, phase->list); | 201 | list_splice(&still_in_list, phase->list); |
207 | } | 202 | } |
@@ -210,6 +205,8 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, | |||
210 | intel_runtime_pm_put(dev_priv); | 205 | intel_runtime_pm_put(dev_priv); |
211 | 206 | ||
212 | i915_gem_retire_requests(dev_priv); | 207 | i915_gem_retire_requests(dev_priv); |
208 | /* expedite the RCU grace period to free some request slabs */ | ||
209 | synchronize_rcu_expedited(); | ||
213 | 210 | ||
214 | return count; | 211 | return count; |
215 | } | 212 | } |
@@ -230,10 +227,15 @@ i915_gem_shrink(struct drm_i915_private *dev_priv, | |||
230 | */ | 227 | */ |
231 | unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) | 228 | unsigned long i915_gem_shrink_all(struct drm_i915_private *dev_priv) |
232 | { | 229 | { |
233 | return i915_gem_shrink(dev_priv, -1UL, | 230 | unsigned long freed; |
234 | I915_SHRINK_BOUND | | 231 | |
235 | I915_SHRINK_UNBOUND | | 232 | freed = i915_gem_shrink(dev_priv, -1UL, |
236 | I915_SHRINK_ACTIVE); | 233 | I915_SHRINK_BOUND | |
234 | I915_SHRINK_UNBOUND | | ||
235 | I915_SHRINK_ACTIVE); | ||
236 | rcu_barrier(); /* wait until our RCU delayed slab frees are completed */ | ||
237 | |||
238 | return freed; | ||
237 | } | 239 | } |
238 | 240 | ||
239 | static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) | 241 | static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) |
@@ -242,9 +244,6 @@ static bool i915_gem_shrinker_lock(struct drm_device *dev, bool *unlock) | |||
242 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) | 244 | if (!mutex_is_locked_by(&dev->struct_mutex, current)) |
243 | return false; | 245 | return false; |
244 | 246 | ||
245 | if (to_i915(dev)->mm.shrinker_no_lock_stealing) | ||
246 | return false; | ||
247 | |||
248 | *unlock = false; | 247 | *unlock = false; |
249 | } else | 248 | } else |
250 | *unlock = true; | 249 | *unlock = true; |
@@ -273,7 +272,7 @@ i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) | |||
273 | count += obj->base.size >> PAGE_SHIFT; | 272 | count += obj->base.size >> PAGE_SHIFT; |
274 | 273 | ||
275 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { | 274 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) { |
276 | if (!obj->active && can_release_pages(obj)) | 275 | if (!i915_gem_object_is_active(obj) && can_release_pages(obj)) |
277 | count += obj->base.size >> PAGE_SHIFT; | 276 | count += obj->base.size >> PAGE_SHIFT; |
278 | } | 277 | } |
279 | 278 | ||
@@ -321,17 +320,22 @@ i915_gem_shrinker_lock_uninterruptible(struct drm_i915_private *dev_priv, | |||
321 | struct shrinker_lock_uninterruptible *slu, | 320 | struct shrinker_lock_uninterruptible *slu, |
322 | int timeout_ms) | 321 | int timeout_ms) |
323 | { | 322 | { |
324 | unsigned long timeout = msecs_to_jiffies(timeout_ms) + 1; | 323 | unsigned long timeout = jiffies + msecs_to_jiffies_timeout(timeout_ms); |
324 | |||
325 | do { | ||
326 | if (i915_gem_wait_for_idle(dev_priv, false) == 0 && | ||
327 | i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) | ||
328 | break; | ||
325 | 329 | ||
326 | while (!i915_gem_shrinker_lock(&dev_priv->drm, &slu->unlock)) { | ||
327 | schedule_timeout_killable(1); | 330 | schedule_timeout_killable(1); |
328 | if (fatal_signal_pending(current)) | 331 | if (fatal_signal_pending(current)) |
329 | return false; | 332 | return false; |
330 | if (--timeout == 0) { | 333 | |
334 | if (time_after(jiffies, timeout)) { | ||
331 | pr_err("Unable to lock GPU to purge memory.\n"); | 335 | pr_err("Unable to lock GPU to purge memory.\n"); |
332 | return false; | 336 | return false; |
333 | } | 337 | } |
334 | } | 338 | } while (1); |
335 | 339 | ||
336 | slu->was_interruptible = dev_priv->mm.interruptible; | 340 | slu->was_interruptible = dev_priv->mm.interruptible; |
337 | dev_priv->mm.interruptible = false; | 341 | dev_priv->mm.interruptible = false; |
@@ -410,7 +414,7 @@ i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr | |||
410 | return NOTIFY_DONE; | 414 | return NOTIFY_DONE; |
411 | 415 | ||
412 | /* Force everything onto the inactive lists */ | 416 | /* Force everything onto the inactive lists */ |
413 | ret = i915_gem_wait_for_idle(dev_priv); | 417 | ret = i915_gem_wait_for_idle(dev_priv, false); |
414 | if (ret) | 418 | if (ret) |
415 | goto out; | 419 | goto out; |
416 | 420 | ||