diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2010-08-07 06:01:21 -0400 |
---|---|---|
committer | Eric Anholt <eric@anholt.net> | 2010-08-09 14:24:32 -0400 |
commit | 0108a3edd5c2e3b150a550d565b6aa1a67c0edbe (patch) | |
tree | 95694d7fdef710345c43dd0d2364eee1377e87d2 | |
parent | bf1a10923920f56da23a118de2511a72af341d61 (diff) |
drm/i915: prepare for fair lru eviction
This does two little changes:
- Add an alignment parameter for evict_something. It's not really great to
whack a carefully sized hole into the gtt with the wrong alignment.
Especially since the fallback path is a full evict.
- With the inactive scan stuff we need to evict more that one object, so
move the unbind call into the helper function that scans for the object
to be evicted, too. And adjust its name.
No functional changes in this patch, just preparation.
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 67 |
1 files changed, 41 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 75e7b899e033..f150bfd2c851 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/swap.h> | 35 | #include <linux/swap.h> |
36 | #include <linux/pci.h> | 36 | #include <linux/pci.h> |
37 | 37 | ||
38 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | ||
38 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); | 39 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
39 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 40 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
40 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 41 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
@@ -48,7 +49,8 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | |||
48 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, | 49 | static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, |
49 | unsigned alignment); | 50 | unsigned alignment); |
50 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); | 51 | static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); |
51 | static int i915_gem_evict_something(struct drm_device *dev, int min_size); | 52 | static int i915_gem_evict_something(struct drm_device *dev, int min_size, |
53 | unsigned alignment); | ||
52 | static int i915_gem_evict_from_inactive_list(struct drm_device *dev); | 54 | static int i915_gem_evict_from_inactive_list(struct drm_device *dev); |
53 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | 55 | static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, |
54 | struct drm_i915_gem_pwrite *args, | 56 | struct drm_i915_gem_pwrite *args, |
@@ -313,7 +315,8 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj) | |||
313 | if (ret == -ENOMEM) { | 315 | if (ret == -ENOMEM) { |
314 | struct drm_device *dev = obj->dev; | 316 | struct drm_device *dev = obj->dev; |
315 | 317 | ||
316 | ret = i915_gem_evict_something(dev, obj->size); | 318 | ret = i915_gem_evict_something(dev, obj->size, |
319 | i915_gem_get_gtt_alignment(obj)); | ||
317 | if (ret) | 320 | if (ret) |
318 | return ret; | 321 | return ret; |
319 | 322 | ||
@@ -2005,10 +2008,12 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2005 | return ret; | 2008 | return ret; |
2006 | } | 2009 | } |
2007 | 2010 | ||
2008 | static struct drm_gem_object * | 2011 | static int |
2009 | i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | 2012 | i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size, |
2013 | unsigned alignment, int *found) | ||
2010 | { | 2014 | { |
2011 | drm_i915_private_t *dev_priv = dev->dev_private; | 2015 | drm_i915_private_t *dev_priv = dev->dev_private; |
2016 | struct drm_gem_object *obj; | ||
2012 | struct drm_i915_gem_object *obj_priv; | 2017 | struct drm_i915_gem_object *obj_priv; |
2013 | struct drm_gem_object *best = NULL; | 2018 | struct drm_gem_object *best = NULL; |
2014 | struct drm_gem_object *first = NULL; | 2019 | struct drm_gem_object *first = NULL; |
@@ -2022,14 +2027,31 @@ i915_gem_find_inactive_object(struct drm_device *dev, int min_size) | |||
2022 | (!best || obj->size < best->size)) { | 2027 | (!best || obj->size < best->size)) { |
2023 | best = obj; | 2028 | best = obj; |
2024 | if (best->size == min_size) | 2029 | if (best->size == min_size) |
2025 | return best; | 2030 | break; |
2026 | } | 2031 | } |
2027 | if (!first) | 2032 | if (!first) |
2028 | first = obj; | 2033 | first = obj; |
2029 | } | 2034 | } |
2030 | } | 2035 | } |
2031 | 2036 | ||
2032 | return best ? best : first; | 2037 | obj = best ? best : first; |
2038 | |||
2039 | if (!obj) { | ||
2040 | *found = 0; | ||
2041 | return 0; | ||
2042 | } | ||
2043 | |||
2044 | *found = 1; | ||
2045 | |||
2046 | #if WATCH_LRU | ||
2047 | DRM_INFO("%s: evicting %p\n", __func__, obj); | ||
2048 | #endif | ||
2049 | obj_priv = to_intel_bo(obj); | ||
2050 | BUG_ON(obj_priv->pin_count != 0); | ||
2051 | BUG_ON(obj_priv->active); | ||
2052 | |||
2053 | /* Wait on the rendering and unbind the buffer. */ | ||
2054 | return i915_gem_object_unbind(obj); | ||
2033 | } | 2055 | } |
2034 | 2056 | ||
2035 | static int | 2057 | static int |
@@ -2115,11 +2137,11 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
2115 | } | 2137 | } |
2116 | 2138 | ||
2117 | static int | 2139 | static int |
2118 | i915_gem_evict_something(struct drm_device *dev, int min_size) | 2140 | i915_gem_evict_something(struct drm_device *dev, |
2141 | int min_size, unsigned alignment) | ||
2119 | { | 2142 | { |
2120 | drm_i915_private_t *dev_priv = dev->dev_private; | 2143 | drm_i915_private_t *dev_priv = dev->dev_private; |
2121 | struct drm_gem_object *obj; | 2144 | int ret, found; |
2122 | int ret; | ||
2123 | 2145 | ||
2124 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 2146 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; |
2125 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; | 2147 | struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; |
@@ -2129,20 +2151,11 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2129 | /* If there's an inactive buffer available now, grab it | 2151 | /* If there's an inactive buffer available now, grab it |
2130 | * and be done. | 2152 | * and be done. |
2131 | */ | 2153 | */ |
2132 | obj = i915_gem_find_inactive_object(dev, min_size); | 2154 | ret = i915_gem_scan_inactive_list_and_evict(dev, min_size, |
2133 | if (obj) { | 2155 | alignment, |
2134 | struct drm_i915_gem_object *obj_priv; | 2156 | &found); |
2135 | 2157 | if (found) | |
2136 | #if WATCH_LRU | 2158 | return ret; |
2137 | DRM_INFO("%s: evicting %p\n", __func__, obj); | ||
2138 | #endif | ||
2139 | obj_priv = to_intel_bo(obj); | ||
2140 | BUG_ON(obj_priv->pin_count != 0); | ||
2141 | BUG_ON(obj_priv->active); | ||
2142 | |||
2143 | /* Wait on the rendering and unbind the buffer. */ | ||
2144 | return i915_gem_object_unbind(obj); | ||
2145 | } | ||
2146 | 2159 | ||
2147 | /* If we didn't get anything, but the ring is still processing | 2160 | /* If we didn't get anything, but the ring is still processing |
2148 | * things, wait for the next to finish and hopefully leave us | 2161 | * things, wait for the next to finish and hopefully leave us |
@@ -2184,6 +2197,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size) | |||
2184 | * will get moved to inactive. | 2197 | * will get moved to inactive. |
2185 | */ | 2198 | */ |
2186 | if (!list_empty(&dev_priv->mm.flushing_list)) { | 2199 | if (!list_empty(&dev_priv->mm.flushing_list)) { |
2200 | struct drm_gem_object *obj = NULL; | ||
2187 | struct drm_i915_gem_object *obj_priv; | 2201 | struct drm_i915_gem_object *obj_priv; |
2188 | 2202 | ||
2189 | /* Find an object that we can immediately reuse */ | 2203 | /* Find an object that we can immediately reuse */ |
@@ -2661,7 +2675,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2661 | #if WATCH_LRU | 2675 | #if WATCH_LRU |
2662 | DRM_INFO("%s: GTT full, evicting something\n", __func__); | 2676 | DRM_INFO("%s: GTT full, evicting something\n", __func__); |
2663 | #endif | 2677 | #endif |
2664 | ret = i915_gem_evict_something(dev, obj->size); | 2678 | ret = i915_gem_evict_something(dev, obj->size, alignment); |
2665 | if (ret) | 2679 | if (ret) |
2666 | return ret; | 2680 | return ret; |
2667 | 2681 | ||
@@ -2679,7 +2693,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2679 | 2693 | ||
2680 | if (ret == -ENOMEM) { | 2694 | if (ret == -ENOMEM) { |
2681 | /* first try to clear up some space from the GTT */ | 2695 | /* first try to clear up some space from the GTT */ |
2682 | ret = i915_gem_evict_something(dev, obj->size); | 2696 | ret = i915_gem_evict_something(dev, obj->size, |
2697 | alignment); | ||
2683 | if (ret) { | 2698 | if (ret) { |
2684 | /* now try to shrink everyone else */ | 2699 | /* now try to shrink everyone else */ |
2685 | if (gfpmask) { | 2700 | if (gfpmask) { |
@@ -2709,7 +2724,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | |||
2709 | drm_mm_put_block(obj_priv->gtt_space); | 2724 | drm_mm_put_block(obj_priv->gtt_space); |
2710 | obj_priv->gtt_space = NULL; | 2725 | obj_priv->gtt_space = NULL; |
2711 | 2726 | ||
2712 | ret = i915_gem_evict_something(dev, obj->size); | 2727 | ret = i915_gem_evict_something(dev, obj->size, alignment); |
2713 | if (ret) | 2728 | if (ret) |
2714 | return ret; | 2729 | return ret; |
2715 | 2730 | ||