diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2010-04-20 12:10:35 -0400 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2010-05-07 16:59:28 -0400 |
commit | 1637ef413b9a5d1c14eb370f7029a5558f3bb3d3 (patch) | |
tree | 5305336bef61e0fe40b4578425609bac4f18bf96 /drivers/gpu | |
parent | 0a31a448659d48cbc38f5e7520d8a65f8f1f8276 (diff) |
drm/i915: Wait for the GPU whilst shrinking, if truly desperate.
By idling the GPU and discarding everything we can when under extreme
memory pressure, the number of OOM-killer events is dramatically
reduced. For instance, this makes it possible to run
firefox-planet-gnome.trace again on my swapless 512MiB i915.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Eric Anholt <eric@anholt.net>
Diffstat (limited to 'drivers/gpu')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 45 |
1 files changed, 45 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 47c46ed384f1..3471dece13e7 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -5066,6 +5066,20 @@ void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv) | |||
5066 | } | 5066 | } |
5067 | 5067 | ||
5068 | static int | 5068 | static int |
5069 | i915_gpu_is_active(struct drm_device *dev) | ||
5070 | { | ||
5071 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
5072 | int lists_empty; | ||
5073 | |||
5074 | spin_lock(&dev_priv->mm.active_list_lock); | ||
5075 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||
5076 | list_empty(&dev_priv->mm.active_list); | ||
5077 | spin_unlock(&dev_priv->mm.active_list_lock); | ||
5078 | |||
5079 | return !lists_empty; | ||
5080 | } | ||
5081 | |||
5082 | static int | ||
5069 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | 5083 | i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) |
5070 | { | 5084 | { |
5071 | drm_i915_private_t *dev_priv, *next_dev; | 5085 | drm_i915_private_t *dev_priv, *next_dev; |
@@ -5094,6 +5108,7 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
5094 | 5108 | ||
5095 | spin_lock(&shrink_list_lock); | 5109 | spin_lock(&shrink_list_lock); |
5096 | 5110 | ||
5111 | rescan: | ||
5097 | /* first scan for clean buffers */ | 5112 | /* first scan for clean buffers */ |
5098 | list_for_each_entry_safe(dev_priv, next_dev, | 5113 | list_for_each_entry_safe(dev_priv, next_dev, |
5099 | &shrink_list, mm.shrink_list) { | 5114 | &shrink_list, mm.shrink_list) { |
@@ -5151,6 +5166,36 @@ i915_gem_shrink(int nr_to_scan, gfp_t gfp_mask) | |||
5151 | would_deadlock = 0; | 5166 | would_deadlock = 0; |
5152 | } | 5167 | } |
5153 | 5168 | ||
5169 | if (nr_to_scan) { | ||
5170 | int active = 0; | ||
5171 | |||
5172 | /* | ||
5173 | * We are desperate for pages, so as a last resort, wait | ||
5174 | * for the GPU to finish and discard whatever we can. | ||
5175 | * This has a dramatic impact to reduce the number of | ||
5176 | * OOM-killer events whilst running the GPU aggressively. | ||
5177 | */ | ||
5178 | list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) { | ||
5179 | struct drm_device *dev = dev_priv->dev; | ||
5180 | |||
5181 | if (!mutex_trylock(&dev->struct_mutex)) | ||
5182 | continue; | ||
5183 | |||
5184 | spin_unlock(&shrink_list_lock); | ||
5185 | |||
5186 | if (i915_gpu_is_active(dev)) { | ||
5187 | i915_gpu_idle(dev); | ||
5188 | active++; | ||
5189 | } | ||
5190 | |||
5191 | spin_lock(&shrink_list_lock); | ||
5192 | mutex_unlock(&dev->struct_mutex); | ||
5193 | } | ||
5194 | |||
5195 | if (active) | ||
5196 | goto rescan; | ||
5197 | } | ||
5198 | |||
5154 | spin_unlock(&shrink_list_lock); | 5199 | spin_unlock(&shrink_list_lock); |
5155 | 5200 | ||
5156 | if (would_deadlock) | 5201 | if (would_deadlock) |