aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2010-08-07 06:01:24 -0400
committerEric Anholt <eric@anholt.net>2010-08-09 14:24:32 -0400
commitcd377ea93f34cbd6ec49c868b66a5a7ab184775c (patch)
tree2a9a09f5b41ca61150a924925269141b75112244
parentb47eb4a2b302f33adaed2a27d2b3bfc74fe35ac5 (diff)
drm/i915: Implement fair lru eviction across both rings. (v2)
Based in a large part upon Daniel Vetter's implementation and adapted for handling multiple rings in a single pass. This should lead to better gtt usage and fixes the page-fault-of-doom triggered. The fairness is provided by scanning through the GTT space amalgamating space in rendering order. As soon as we have a contiguous space in the GTT large enough for the new object (and its alignment), evict any object which lies within that space. This should keep more objects resident in the GTT. Doing throughput testing on a PineView machine with cairo-perf-trace indicates that there is very little difference with the new LRU scan, perhaps a small improvement... Except oddly for the poppler trace. Reference: Bug 15911 - Intermittent X crash (freeze) https://bugzilla.kernel.org/show_bug.cgi?id=15911 Bug 20152 - cannot view JPG in firefox when running UXA https://bugs.freedesktop.org/show_bug.cgi?id=20152 Bug 24369 - Hang when scrolling firefox page with window in front https://bugs.freedesktop.org/show_bug.cgi?id=24369 Bug 28478 - Intermittent graphics lockups due to overflow/loop https://bugs.freedesktop.org/show_bug.cgi?id=28478 v2: Attempt to clarify the logic and order of eviction through the use of comments and macros. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Daniel Vetter <daniel@ffwll.ch> Signed-off-by: Eric Anholt <eric@anholt.net>
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c277
2 files changed, 146 insertions, 133 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 12c8f47f984b..6221f239fa5e 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -673,6 +673,8 @@ struct drm_i915_gem_object {
673 struct list_head list; 673 struct list_head list;
674 /** This object's place on GPU write list */ 674 /** This object's place on GPU write list */
675 struct list_head gpu_write_list; 675 struct list_head gpu_write_list;
676 /** This object's place on eviction list */
677 struct list_head evict_list;
676 678
677 /** 679 /**
678 * This is set if the object is on the active or flushing lists 680 * This is set if the object is on the active or flushing lists
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 479e450f931b..72cae3cccad8 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -31,167 +31,178 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33 33
34static inline int 34static struct drm_i915_gem_object *
35i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv) 35i915_gem_next_active_object(struct drm_device *dev,
36{ 36 struct list_head **render_iter,
37 return obj_priv->madv == I915_MADV_DONTNEED; 37 struct list_head **bsd_iter)
38}
39
40static int
41i915_gem_scan_inactive_list_and_evict(struct drm_device *dev, int min_size,
42 unsigned alignment, int *found)
43{ 38{
44 drm_i915_private_t *dev_priv = dev->dev_private; 39 drm_i915_private_t *dev_priv = dev->dev_private;
45 struct drm_gem_object *obj; 40 struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
46 struct drm_i915_gem_object *obj_priv;
47 struct drm_gem_object *best = NULL;
48 struct drm_gem_object *first = NULL;
49
50 /* Try to find the smallest clean object */
51 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
52 struct drm_gem_object *obj = &obj_priv->base;
53 if (obj->size >= min_size) {
54 if ((!obj_priv->dirty ||
55 i915_gem_object_is_purgeable(obj_priv)) &&
56 (!best || obj->size < best->size)) {
57 best = obj;
58 if (best->size == min_size)
59 break;
60 }
61 if (!first)
62 first = obj;
63 }
64 }
65 41
66 obj = best ? best : first; 42 if (*render_iter != &dev_priv->render_ring.active_list)
43 render_obj = list_entry(*render_iter,
44 struct drm_i915_gem_object,
45 list);
67 46
68 if (!obj) { 47 if (HAS_BSD(dev)) {
69 *found = 0; 48 if (*bsd_iter != &dev_priv->bsd_ring.active_list)
70 return 0; 49 bsd_obj = list_entry(*bsd_iter,
71 } 50 struct drm_i915_gem_object,
51 list);
72 52
73 *found = 1; 53 if (render_obj == NULL) {
54 *bsd_iter = (*bsd_iter)->next;
55 return bsd_obj;
56 }
74 57
75#if WATCH_LRU 58 if (bsd_obj == NULL) {
76 DRM_INFO("%s: evicting %p\n", __func__, obj); 59 *render_iter = (*render_iter)->next;
77#endif 60 return render_obj;
78 obj_priv = to_intel_bo(obj); 61 }
79 BUG_ON(obj_priv->pin_count != 0);
80 BUG_ON(obj_priv->active);
81 62
82 /* Wait on the rendering and unbind the buffer. */ 63 /* XXX can we handle seqno wrapping? */
83 return i915_gem_object_unbind(obj); 64 if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
65 *render_iter = (*render_iter)->next;
66 return render_obj;
67 } else {
68 *bsd_iter = (*bsd_iter)->next;
69 return bsd_obj;
70 }
71 } else {
72 *render_iter = (*render_iter)->next;
73 return render_obj;
74 }
84} 75}
85 76
86static void 77static bool
87i915_gem_flush_ring(struct drm_device *dev, 78mark_free(struct drm_i915_gem_object *obj_priv,
88 uint32_t invalidate_domains, 79 struct list_head *unwind)
89 uint32_t flush_domains,
90 struct intel_ring_buffer *ring)
91{ 80{
92 if (flush_domains & I915_GEM_DOMAIN_CPU) 81 list_add(&obj_priv->evict_list, unwind);
93 drm_agp_chipset_flush(dev); 82 return drm_mm_scan_add_block(obj_priv->gtt_space);
94 ring->flush(dev, ring,
95 invalidate_domains,
96 flush_domains);
97} 83}
98 84
85#define i915_for_each_active_object(OBJ, R, B) \
86 *(R) = dev_priv->render_ring.active_list.next; \
87 *(B) = dev_priv->bsd_ring.active_list.next; \
88 while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
89
99int 90int
100i915_gem_evict_something(struct drm_device *dev, 91i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
101 int min_size, unsigned alignment)
102{ 92{
103 drm_i915_private_t *dev_priv = dev->dev_private; 93 drm_i915_private_t *dev_priv = dev->dev_private;
104 int ret, found; 94 struct list_head eviction_list, unwind_list;
105 95 struct drm_i915_gem_object *obj_priv, *tmp_obj_priv;
106 struct intel_ring_buffer *render_ring = &dev_priv->render_ring; 96 struct list_head *render_iter, *bsd_iter;
107 struct intel_ring_buffer *bsd_ring = &dev_priv->bsd_ring; 97 int ret = 0;
108 for (;;) {
109 i915_gem_retire_requests(dev);
110
111 /* If there's an inactive buffer available now, grab it
112 * and be done.
113 */
114 ret = i915_gem_scan_inactive_list_and_evict(dev, min_size,
115 alignment,
116 &found);
117 if (found)
118 return ret;
119 98
120 /* If we didn't get anything, but the ring is still processing 99 i915_gem_retire_requests(dev);
121 * things, wait for the next to finish and hopefully leave us
122 * a buffer to evict.
123 */
124 if (!list_empty(&render_ring->request_list)) {
125 struct drm_i915_gem_request *request;
126 100
127 request = list_first_entry(&render_ring->request_list, 101 /* Re-check for free space after retiring requests */
128 struct drm_i915_gem_request, 102 if (drm_mm_search_free(&dev_priv->mm.gtt_space,
129 list); 103 min_size, alignment, 0))
104 return 0;
130 105
131 ret = i915_do_wait_request(dev, request->seqno, true, request->ring); 106 /*
132 if (ret) 107 * The goal is to evict objects and amalgamate space in LRU order.
133 return ret; 108 * The oldest idle objects reside on the inactive list, which is in
109 * retirement order. The next objects to retire are those on the (per
110 * ring) active list that do not have an outstanding flush. Once the
111 * hardware reports completion (the seqno is updated after the
112 * batchbuffer has been finished) the clean buffer objects would
113 * be retired to the inactive list. Any dirty objects would be added
114 * to the tail of the flushing list. So after processing the clean
115 * active objects we need to emit a MI_FLUSH to retire the flushing
116 * list, hence the retirement order of the flushing list is in
117 * advance of the dirty objects on the active lists.
118 *
119 * The retirement sequence is thus:
120 * 1. Inactive objects (already retired)
121 * 2. Clean active objects
122 * 3. Flushing list
123 * 4. Dirty active objects.
124 *
125 * On each list, the oldest objects lie at the HEAD with the freshest
126 * object on the TAIL.
127 */
128
129 INIT_LIST_HEAD(&unwind_list);
130 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
131
132 /* First see if there is a large enough contiguous idle region... */
133 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
134 if (mark_free(obj_priv, &unwind_list))
135 goto found;
136 }
134 137
138 /* Now merge in the soon-to-be-expired objects... */
139 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
140 /* Does the object require an outstanding flush? */
141 if (obj_priv->base.write_domain || obj_priv->pin_count)
135 continue; 142 continue;
136 }
137 143
138 if (HAS_BSD(dev) && !list_empty(&bsd_ring->request_list)) { 144 if (mark_free(obj_priv, &unwind_list))
139 struct drm_i915_gem_request *request; 145 goto found;
140 146 }
141 request = list_first_entry(&bsd_ring->request_list,
142 struct drm_i915_gem_request,
143 list);
144 147
145 ret = i915_do_wait_request(dev, request->seqno, true, request->ring); 148 /* Finally add anything with a pending flush (in order of retirement) */
146 if (ret) 149 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
147 return ret; 150 if (obj_priv->pin_count)
151 continue;
148 152
153 if (mark_free(obj_priv, &unwind_list))
154 goto found;
155 }
156 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
157 if (! obj_priv->base.write_domain || obj_priv->pin_count)
149 continue; 158 continue;
150 }
151 159
152 /* If we didn't have anything on the request list but there 160 if (mark_free(obj_priv, &unwind_list))
153 * are buffers awaiting a flush, emit one and try again. 161 goto found;
154 * When we wait on it, those buffers waiting for that flush 162 }
155 * will get moved to inactive. 163
156 */ 164 /* Nothing found, clean up and bail out! */
157 if (!list_empty(&dev_priv->mm.flushing_list)) { 165 list_for_each_entry(obj_priv, &unwind_list, evict_list) {
158 struct drm_gem_object *obj = NULL; 166 ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
159 struct drm_i915_gem_object *obj_priv; 167 BUG_ON(ret);
160 168 }
161 /* Find an object that we can immediately reuse */ 169
162 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 170 /* We expect the caller to unpin, evict all and try again, or give up.
163 obj = &obj_priv->base; 171 * So calling i915_gem_evict_everything() is unnecessary.
164 if (obj->size >= min_size) 172 */
165 break; 173 return -ENOSPC;
166 174
167 obj = NULL; 175found:
168 } 176 INIT_LIST_HEAD(&eviction_list);
169 177 list_for_each_entry_safe(obj_priv, tmp_obj_priv,
170 if (obj != NULL) { 178 &unwind_list, evict_list) {
171 uint32_t seqno; 179 if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
172 180 /* drm_mm doesn't allow any other other operations while
173 i915_gem_flush_ring(dev, 181 * scanning, therefore store to be evicted objects on a
174 obj->write_domain, 182 * temporary list. */
175 obj->write_domain, 183 list_move(&obj_priv->evict_list, &eviction_list);
176 obj_priv->ring);
177 seqno = i915_add_request(dev, NULL,
178 obj->write_domain,
179 obj_priv->ring);
180 if (seqno == 0)
181 return -ENOMEM;
182 continue;
183 }
184 } 184 }
185 }
185 186
186 /* If we didn't do any of the above, there's no single buffer 187 /* Unbinding will emit any required flushes */
187 * large enough to swap out for the new one, so just evict 188 list_for_each_entry_safe(obj_priv, tmp_obj_priv,
188 * everything and start again. (This should be rare.) 189 &eviction_list, evict_list) {
189 */ 190#if WATCH_LRU
190 if (!list_empty(&dev_priv->mm.inactive_list)) 191 DRM_INFO("%s: evicting %p\n", __func__, obj);
191 return i915_gem_evict_inactive(dev); 192#endif
192 else 193 ret = i915_gem_object_unbind(&obj_priv->base);
193 return i915_gem_evict_everything(dev); 194 if (ret)
195 return ret;
194 } 196 }
197
198 /* The just created free hole should be on the top of the free stack
199 * maintained by drm_mm, so this BUG_ON actually executes in O(1).
200 * Furthermore all accessed data has just recently been used, so it
201 * should be really fast, too. */
202 BUG_ON(!drm_mm_search_free(&dev_priv->mm.gtt_space, min_size,
203 alignment, 0));
204
205 return 0;
195} 206}
196 207
197int 208int