aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-08-07 06:01:23 -0400
committerEric Anholt <eric@anholt.net>2010-08-09 14:24:32 -0400
commitb47eb4a2b302f33adaed2a27d2b3bfc74fe35ac5 (patch)
treecf99ac4cf8f54ec8f0a9c99fd821af041c32087a
parent6f392d548658a17600da7faaf8a5df25ee5f01f6 (diff)
drm/i915: Move the eviction logic to its own file.
The eviction code is the gnarly underbelly of memory management, and is clearer if kept separated from the normal domain management in GEM. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/i915/Makefile1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h6
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c231
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c260
4 files changed, 269 insertions, 229 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index da78f2c0d909..384fd4535796 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -8,6 +8,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
8 i915_suspend.o \ 8 i915_suspend.o \
9 i915_gem.o \ 9 i915_gem.o \
10 i915_gem_debug.o \ 10 i915_gem_debug.o \
11 i915_gem_evict.o \
11 i915_gem_tiling.o \ 12 i915_gem_tiling.o \
12 i915_trace_points.o \ 13 i915_trace_points.o \
13 intel_display.o \ 14 intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index def6ee0a3524..12c8f47f984b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -982,6 +982,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
982void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 982void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
983int i915_gem_do_init(struct drm_device *dev, unsigned long start, 983int i915_gem_do_init(struct drm_device *dev, unsigned long start,
984 unsigned long end); 984 unsigned long end);
985int i915_gpu_idle(struct drm_device *dev);
985int i915_gem_idle(struct drm_device *dev); 986int i915_gem_idle(struct drm_device *dev);
986uint32_t i915_add_request(struct drm_device *dev, 987uint32_t i915_add_request(struct drm_device *dev,
987 struct drm_file *file_priv, 988 struct drm_file *file_priv,
@@ -1007,6 +1008,11 @@ int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
1007void i915_gem_shrinker_init(void); 1008void i915_gem_shrinker_init(void);
1008void i915_gem_shrinker_exit(void); 1009void i915_gem_shrinker_exit(void);
1009 1010
1011/* i915_gem_evict.c */
1012int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
1013int i915_gem_evict_everything(struct drm_device *dev);
1014int i915_gem_evict_inactive(struct drm_device *dev);
1015
1010/* i915_gem_tiling.c */ 1016/* i915_gem_tiling.c */
1011void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 1017void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
1012void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj); 1018void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 45b998218d0c..b5a7b00264a6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -49,9 +49,6 @@ static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
50 unsigned alignment); 50 unsigned alignment);
51static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 51static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
52static int i915_gem_evict_something(struct drm_device *dev, int min_size,
53 unsigned alignment);
54static int i915_gem_evict_from_inactive_list(struct drm_device *dev);
55static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 52static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
56 struct drm_i915_gem_pwrite *args, 53 struct drm_i915_gem_pwrite *args,
57 struct drm_file *file_priv); 54 struct drm_file *file_priv);
@@ -1885,19 +1882,6 @@ i915_gem_flush(struct drm_device *dev,
1885 flush_domains); 1882 flush_domains);
1886} 1883}
1887 1884
1888static void
1889i915_gem_flush_ring(struct drm_device *dev,
1890 uint32_t invalidate_domains,
1891 uint32_t flush_domains,
1892 struct intel_ring_buffer *ring)
1893{
1894 if (flush_domains & I915_GEM_DOMAIN_CPU)
1895 drm_agp_chipset_flush(dev);
1896 ring->flush(dev, ring,
1897 invalidate_domains,
1898 flush_domains);
1899}
1900
1901/** 1885/**
1902 * Ensures that all rendering to the object has completed and the object is 1886 * Ensures that all rendering to the object has completed and the object is
1903 * safe to unbind from the GTT or access from the CPU. 1887 * safe to unbind from the GTT or access from the CPU.
@@ -2008,53 +1992,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2008 return ret; 1992 return ret;
2009} 1993}
2010 1994
2011static int 1995int
2012i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
2013 unsigned alignment, int *found)
2014{
2015 drm_i915_private_t *dev_priv = dev->dev_private;
2016 struct drm_gem_object *obj;
2017 struct drm_i915_gem_object *obj_priv;
2018 struct drm_gem_object *best = NULL;
2019 struct drm_gem_object *first = NULL;
2020
2021 /* Try to find the smallest clean object */
2022 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
2023 struct drm_gem_object *obj = &obj_priv->base;
2024 if (obj->size >= min_size) {
2025 if ((!obj_priv->dirty ||
2026 i915_gem_object_is_purgeable(obj_priv)) &&
2027 (!best || obj->size < best->size)) {
2028 best = obj;
2029 if (best->size == min_size)
2030 break;
2031 }
2032 if (!first)
2033 first = obj;
2034 }
2035 }
2036
2037 obj = best ? best : first;
2038
2039 if (!obj) {
2040 *found = 0;
2041 return 0;
2042 }
2043
2044 *found = 1;
2045
2046#if WATCH_LRU
2047 DRM_INFO("%s: evicting %p\n", __func__, obj);
2048#endif
2049 obj_priv = to_intel_bo(obj);
2050 BUG_ON(obj_priv->pin_count != 0);
2051 BUG_ON(obj_priv->active);
2052
2053 /* Wait on the rendering and unbind the buffer. */
2054 return i915_gem_object_unbind(obj);
2055}
2056
2057static int
2058i915_gpu_idle(struct drm_device *dev) 1996i915_gpu_idle(struct drm_device *dev)
2059{ 1997{
2060 drm_i915_private_t *dev_priv = dev->dev_private; 1998 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -2095,147 +2033,6 @@ i915_gpu_idle(struct drm_device *dev)
2095 return ret; 2033 return ret;
2096} 2034}
2097 2035
2098static int
2099i915_gem_evict_everything(struct drm_device *dev)
2100{
2101 drm_i915_private_t *dev_priv = dev->dev_private;
2102 int ret;
2103 bool lists_empty;
2104
2105 spin_lock(&dev_priv->mm.active_list_lock);
2106 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2107 list_empty(&dev_priv->mm.flushing_list) &&
2108 list_empty(&dev_priv->render_ring.active_list) &&
2109 (!HAS_BSD(dev)
2110 || list_empty(&dev_priv->bsd_ring.active_list)));
2111 spin_unlock(&dev_priv->mm.active_list_lock);
2112
2113 if (lists_empty)
2114 return -ENOSPC;
2115
2116 /* Flush everything (on to the inactive lists) and evict */
2117 ret = i915_gpu_idle(dev);
2118 if (ret)
2119 return ret;
2120
2121 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
2122
2123 ret = i915_gem_evict_from_inactive_list(dev);
2124 if (ret)
2125 return ret;
2126
2127 spin_lock(&dev_priv->mm.active_list_lock);
2128 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
2129 list_empty(&dev_priv->mm.flushing_list) &&
2130 list_empty(&dev_priv->render_ring.active_list) &&
2131 (!HAS_BSD(dev)
2132 || list_empty(&dev_priv->bsd_ring.active_list)));
2133 spin_unlock(&dev_priv->mm.active_list_lock);
2134 BUG_ON(!lists_empty);
2135
2136 return 0;
2137}
2138
2139static int
2140i915_gem_evict_something(struct drm_device *dev,
2141 int min_size, unsigned alignment)
2142{
2143 drm_i915_private_t *dev_priv = dev->dev_private;
2144 int ret, found;
2145
2146 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
2147 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
2148 for (;;) {
2149 i915_gem_retire_requests(dev);
2150
2151 /* If there's an inactive buffer available now, grab it
2152 * and be done.
2153 */
2154 ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
2155 alignment,
2156 &found);
2157 if (found)
2158 return ret;
2159
2160 /* If we didn't get anything, but the ring is still processing
2161 * things, wait for the next to finish and hopefully leave us
2162 * a buffer to evict.
2163 */
2164 if (!list_empty(&render_ring->request_list)) {
2165 struct drm_i915_gem_request *request;
2166
2167 request = list_first_entry(&render_ring->request_list,
2168 struct drm_i915_gem_request,
2169 list);
2170
2171 ret = i915_wait_request(dev,
2172 request->seqno, request->ring);
2173 if (ret)
2174 return ret;
2175
2176 continue;
2177 }
2178
2179 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
2180 struct drm_i915_gem_request *request;
2181
2182 request = list_first_entry(&bsd_ring->request_list,
2183 struct drm_i915_gem_request,
2184 list);
2185
2186 ret = i915_wait_request(dev,
2187 request->seqno, request->ring);
2188 if (ret)
2189 return ret;
2190
2191 continue;
2192 }
2193
2194 /* If we didn't have anything on the request list but there
2195 * are buffers awaiting a flush, emit one and try again.
2196 * When we wait on it, those buffers waiting for that flush
2197 * will get moved to inactive.
2198 */
2199 if (!list_empty(&dev_priv->mm.flushing_list)) {
2200 struct drm_gem_object *obj = NULL;
2201 struct drm_i915_gem_object *obj_priv;
2202
2203 /* Find an object that we can immediately reuse */
2204 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
2205 obj = &obj_priv->base;
2206 if (obj->size >= min_size)
2207 break;
2208
2209 obj = NULL;
2210 }
2211
2212 if (obj != NULL) {
2213 uint32_t seqno;
2214
2215 i915_gem_flush_ring(dev,
2216 obj->write_domain,
2217 obj->write_domain,
2218 obj_priv->ring);
2219 seqno = i915_add_request(dev, NULL,
2220 obj->write_domain,
2221 obj_priv->ring);
2222 if (seqno == 0)
2223 return -ENOMEM;
2224 continue;
2225 }
2226 }
2227
2228 /* If we didn't do any of the above, there's no single buffer
2229 * large enough to swap out for the new one, so just evict
2230 * everything and start again. (This should be rare.)
2231 */
2232 if (!list_empty (&dev_priv->mm.inactive_list))
2233 return i915_gem_evict_from_inactive_list(dev);
2234 else
2235 return i915_gem_evict_everything(dev);
2236 }
2237}
2238
2239int 2036int
2240i915_gem_object_get_pages(struct drm_gem_object *obj, 2037i915_gem_object_get_pages(struct drm_gem_object *obj,
2241 gfp_t gfpmask) 2038 gfp_t gfpmask)
@@ -4548,30 +4345,6 @@ void i915_gem_free_object(struct drm_gem_object *obj)
4548 i915_gem_free_object_tail(obj); 4345 i915_gem_free_object_tail(obj);
4549} 4346}
4550 4347
4551/** Unbinds all inactive objects. */
4552static int
4553i915_gem_evict_from_inactive_list(struct drm_device *dev)
4554{
4555 drm_i915_private_t *dev_priv = dev->dev_private;
4556
4557 while (!list_empty(&dev_priv->mm.inactive_list)) {
4558 struct drm_gem_object *obj;
4559 int ret;
4560
4561 obj = &list_first_entry(&dev_priv->mm.inactive_list,
4562 struct drm_i915_gem_object,
4563 list)->base;
4564
4565 ret = i915_gem_object_unbind(obj);
4566 if (ret != 0) {
4567 DRM_ERROR("Error unbinding object: %d\n", ret);
4568 return ret;
4569 }
4570 }
4571
4572 return 0;
4573}
4574
4575int 4348int
4576i915_gem_idle(struct drm_device *dev) 4349i915_gem_idle(struct drm_device *dev)
4577{ 4350{
@@ -4596,7 +4369,7 @@ i915_gem_idle(struct drm_device *dev)
4596 4369
4597 /* Under UMS, be paranoid and evict. */ 4370 /* Under UMS, be paranoid and evict. */
4598 if (!drm_core_check_feature(dev, DRIVER_MODESET)) { 4371 if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
4599 ret = i915_gem_evict_from_inactive_list(dev); 4372 ret = i915_gem_evict_inactive(dev);
4600 if (ret) { 4373 if (ret) {
4601 mutex_unlock(&dev->struct_mutex); 4374 mutex_unlock(&dev->struct_mutex);
4602 return ret; 4375 return ret;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
new file mode 100644
index 000000000000..479e450f931b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -0,0 +1,260 @@
1/*
2 * Copyright © 2008-2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 * Chris Wilson <chris@chris-wilson.co.uuk>
26 *
27 */
28
29#include "drmP.h"
30#include "drm.h"
31#include "i915_drv.h"
32#include "i915_drm.h"
33
34static inline int
35i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
36{
37 return obj_priv->madv == I915_MADV_DONTNEED;
38}
39
40static int
41i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
42 unsigned alignment, int *found)
43{
44 drm_i915_private_t *dev_priv = dev->dev_private;
45 struct drm_gem_object *obj;
46 struct drm_i915_gem_object *obj_priv;
47 struct drm_gem_object *best = NULL;
48 struct drm_gem_object *first = NULL;
49
50 /* Try to find the smallest clean object */
51 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
52 struct drm_gem_object *obj = &obj_priv->base;
53 if (obj->size >= min_size) {
54 if ((!obj_priv->dirty ||
55 i915_gem_object_is_purgeable(obj_priv)) &&
56 (!best || obj->size < best->size)) {
57 best = obj;
58 if (best->size == min_size)
59 break;
60 }
61 if (!first)
62 first = obj;
63 }
64 }
65
66 obj = best ? best : first;
67
68 if (!obj) {
69 *found = 0;
70 return 0;
71 }
72
73 *found = 1;
74
75#if WATCH_LRU
76 DRM_INFO("%s: evicting %p\n", __func__, obj);
77#endif
78 obj_priv = to_intel_bo(obj);
79 BUG_ON(obj_priv->pin_count != 0);
80 BUG_ON(obj_priv->active);
81
82 /* Wait on the rendering and unbind the buffer. */
83 return i915_gem_object_unbind(obj);
84}
85
86static void
87i915_gem_flush_ring(struct drm_device *dev,
88 uint32_t invalidate_domains,
89 uint32_t flush_domains,
90 struct intel_ring_buffer *ring)
91{
92 if (flush_domains & I915_GEM_DOMAIN_CPU)
93 drm_agp_chipset_flush(dev);
94 ring->flush(dev, ring,
95 invalidate_domains,
96 flush_domains);
97}
98
99int
100i915_gem_evict_something(struct drm_device *dev,
101 int min_size, unsigned alignment)
102{
103 drm_i915_private_t *dev_priv = dev->dev_private;
104 int ret, found;
105
106 struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
107 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring;
108 for (;;) {
109 i915_gem_retire_requests(dev);
110
111 /* If there's an inactive buffer available now, grab it
112 * and be done.
113 */
114 ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
115 alignment,
116 &found);
117 if (found)
118 return ret;
119
120 /* If we didn't get anything, but the ring is still processing
121 * things, wait for the next to finish and hopefully leave us
122 * a buffer to evict.
123 */
124 if (!list_empty(&render_ring->request_list)) {
125 struct drm_i915_gem_request *request;
126
127 request = list_first_entry(&render_ring->request_list,
128 struct drm_i915_gem_request,
129 list);
130
131 ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
132 if (ret)
133 return ret;
134
135 continue;
136 }
137
138 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) {
139 struct drm_i915_gem_request *request;
140
141 request = list_first_entry(&bsd_ring->request_list,
142 struct drm_i915_gem_request,
143 list);
144
145 ret = i915_do_wait_request(dev, request->seqno, true, request->ring);
146 if (ret)
147 return ret;
148
149 continue;
150 }
151
152 /* If we didn't have anything on the request list but there
153 * are buffers awaiting a flush, emit one and try again.
154 * When we wait on it, those buffers waiting for that flush
155 * will get moved to inactive.
156 */
157 if (!list_empty(&dev_priv->mm.flushing_list)) {
158 struct drm_gem_object *obj = NULL;
159 struct drm_i915_gem_object *obj_priv;
160
161 /* Find an object that we can immediately reuse */
162 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
163 obj = &obj_priv->base;
164 if (obj->size >= min_size)
165 break;
166
167 obj = NULL;
168 }
169
170 if (obj != NULL) {
171 uint32_t seqno;
172
173 i915_gem_flush_ring(dev,
174 obj->write_domain,
175 obj->write_domain,
176 obj_priv->ring);
177 seqno = i915_add_request(dev, NULL,
178 obj->write_domain,
179 obj_priv->ring);
180 if (seqno == 0)
181 return -ENOMEM;
182 continue;
183 }
184 }
185
186 /* If we didn't do any of the above, there's no single buffer
187 * large enough to swap out for the new one, so just evict
188 * everything and start again. (This should be rare.)
189 */
190 if (!list_empty(&dev_priv->mm.inactive_list))
191 return i915_gem_evict_inactive(dev);
192 else
193 return i915_gem_evict_everything(dev);
194 }
195}
196
197int
198i915_gem_evict_everything(struct drm_device *dev)
199{
200 drm_i915_private_t *dev_priv = dev->dev_private;
201 int ret;
202 bool lists_empty;
203
204 spin_lock(&dev_priv->mm.active_list_lock);
205 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
206 list_empty(&dev_priv->mm.flushing_list) &&
207 list_empty(&dev_priv->render_ring.active_list) &&
208 (!HAS_BSD(dev)
209 || list_empty(&dev_priv->bsd_ring.active_list)));
210 spin_unlock(&dev_priv->mm.active_list_lock);
211
212 if (lists_empty)
213 return -ENOSPC;
214
215 /* Flush everything (on to the inactive lists) and evict */
216 ret = i915_gpu_idle(dev);
217 if (ret)
218 return ret;
219
220 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
221
222 ret = i915_gem_evict_inactive(dev);
223 if (ret)
224 return ret;
225
226 spin_lock(&dev_priv->mm.active_list_lock);
227 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
228 list_empty(&dev_priv->mm.flushing_list) &&
229 list_empty(&dev_priv->render_ring.active_list) &&
230 (!HAS_BSD(dev)
231 || list_empty(&dev_priv->bsd_ring.active_list)));
232 spin_unlock(&dev_priv->mm.active_list_lock);
233 BUG_ON(!lists_empty);
234
235 return 0;
236}
237
238/** Unbinds all inactive objects. */
239int
240i915_gem_evict_inactive(struct drm_device *dev)
241{
242 drm_i915_private_t *dev_priv = dev->dev_private;
243
244 while (!list_empty(&dev_priv->mm.inactive_list)) {
245 struct drm_gem_object *obj;
246 int ret;
247
248 obj = &list_first_entry(&dev_priv->mm.inactive_list,
249 struct drm_i915_gem_object,
250 list)->base;
251
252 ret = i915_gem_object_unbind(obj);
253 if (ret != 0) {
254 DRM_ERROR("Error unbinding object: %d\n", ret);
255 return ret;
256 }
257 }
258
259 return 0;
260}