diff options
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 47c46ed384f1..3471dece13e7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -5066,6 +5066,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | |||
| 5066 | } | 5066 | } |
| 5067 | 5067 | ||
| 5068 | static int | 5068 | static int |
| 5069 | i915_gpu_is_active(struct drm_device *dev) | ||
| 5070 | { | ||
| 5071 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
| 5072 | int lists_empty; | ||
| 5073 | |||
| 5074 | spin_lock(&dev_priv->mm.active_list_lock); | ||
| 5075 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||
| 5076 | list_empty(&dev_priv->mm.active_list); | ||
| 5077 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
| 5078 | |||
| 5079 | return !lists_empty; | ||
| 5080 | } | ||
| 5081 | |||
| 5082 | static int | ||
| 5069 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | 5083 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) |
| 5070 | { | 5084 | { |
| 5071 | drm_i915_private_t *dev_priv, *next_dev; | 5085 | drm_i915_private_t *dev_priv, *next_dev; |
| @@ -5094,6 +5108,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
| 5094 | 5108 | ||
| 5095 | spin_lock(&shrink_list_lock); | 5109 | spin_lock(&shrink_list_lock); |
| 5096 | 5110 | ||
| 5111 | rescan: | ||
| 5097 | /* first scan for clean buffers */ | 5112 | /* first scan for clean buffers */ |
| 5098 | list_for_each_entry_safe(dev_priv, next_dev, | 5113 | list_for_each_entry_safe(dev_priv, next_dev, |
| 5099 | &shrink_list, mm.shrink_list) { | 5114 | &shrink_list, mm.shrink_list) { |
| @@ -5151,6 +5166,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
| 5151 | would_deadlock = 0; | 5166 | would_deadlock = 0; |
| 5152 | } | 5167 | } |
| 5153 | 5168 | ||
| 5169 | if (nr_to_scan) { | ||
| 5170 | int active = 0; | ||
| 5171 | |||
| 5172 | /* | ||
| 5173 | * We are desperate for pages, so as a last resort, wait | ||
| 5174 | * for the GPU to finish and discard whatever we can. | ||
| 5175 | * This has a dramatic impact to reduce the number of | ||
| 5176 | * OOM-killer events whilst running the GPU aggressively. | ||
| 5177 | */ | ||
| 5178 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | ||
| 5179 | struct drm_device *dev = dev_priv->dev; | ||
| 5180 | |||
| 5181 | if (!mutex_trylock(&dev->struct_mutex)) | ||
| 5182 | continue; | ||
| 5183 | |||
| 5184 | spin_unlock(&shrink_list_lock); | ||
| 5185 | |||
| 5186 | if (i915_gpu_is_active(dev)) { | ||
| 5187 | i915_gpu_idle(dev); | ||
| 5188 | active++; | ||
| 5189 | } | ||
| 5190 | |||
| 5191 | spin_lock(&shrink_list_lock); | ||
| 5192 | mutex_unlock(&dev->struct_mutex); | ||
| 5193 | } | ||
| 5194 | |||
| 5195 | if (active) | ||
| 5196 | goto rescan; | ||
| 5197 | } | ||
| 5198 | |||
| 5154 | spin_unlock(&shrink_list_lock); | 5199 | spin_unlock(&shrink_list_lock); |
| 5155 | 5200 | ||
| 5156 | if (would_deadlock) | 5201 | if (would_deadlock) |
