aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-07-31 20:00:11 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-08-08 08:04:43 -0400
commitf6cd1f15d345688cb95cc195aaf8b375f7de8cf6 (patch)
treed0a33c5577684c5a39fdf0d9821f9d9ebf38eb81 /drivers/gpu/drm/i915
parent07fe0b12800d4752d729d4122c01f41f80a5ba5a (diff)
drm/i915: Use new bind/unbind in eviction code
Eviction code, like the rest of the converted code needs to be aware of the address space for which it is evicting (or the everything case, all addresses). With the updated bind/unbind interfaces of the last patch, we can now safely move the eviction code over. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c53
3 files changed, 33 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8205b4b4f2be..2421ad17831b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1983,7 +1983,9 @@ static inline void i915_gem_chipset_flush(struct drm_device *dev)
1983 1983
1984 1984
1985/* i915_gem_evict.c */ 1985/* i915_gem_evict.c */
1986int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size, 1986int __must_check i915_gem_evict_something(struct drm_device *dev,
1987 struct i915_address_space *vm,
1988 int min_size,
1987 unsigned alignment, 1989 unsigned alignment,
1988 unsigned cache_level, 1990 unsigned cache_level,
1989 bool mappable, 1991 bool mappable,
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index db9792c47827..6c8c6b6b91ca 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3166,7 +3166,7 @@ search_free:
3166 size, alignment, 3166 size, alignment,
3167 obj->cache_level, 0, gtt_max); 3167 obj->cache_level, 0, gtt_max);
3168 if (ret) { 3168 if (ret) {
3169 ret = i915_gem_evict_something(dev, size, alignment, 3169 ret = i915_gem_evict_something(dev, vm, size, alignment,
3170 obj->cache_level, 3170 obj->cache_level,
3171 map_and_fenceable, 3171 map_and_fenceable,
3172 nonblocking); 3172 nonblocking);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 9205a4179b7e..61bf5e20e5e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,26 +32,21 @@
32#include "i915_trace.h" 32#include "i915_trace.h"
33 33
34static bool 34static bool
35mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind) 35mark_free(struct i915_vma *vma, struct list_head *unwind)
36{ 36{
37 struct drm_device *dev = obj->base.dev; 37 if (vma->obj->pin_count)
38 struct drm_i915_private *dev_priv = dev->dev_private;
39 struct i915_vma *vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base);
40
41 if (obj->pin_count)
42 return false; 38 return false;
43 39
44 list_add(&obj->exec_list, unwind); 40 list_add(&vma->obj->exec_list, unwind);
45 return drm_mm_scan_add_block(&vma->node); 41 return drm_mm_scan_add_block(&vma->node);
46} 42}
47 43
48int 44int
49i915_gem_evict_something(struct drm_device *dev, int min_size, 45i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
50 unsigned alignment, unsigned cache_level, 46 int min_size, unsigned alignment, unsigned cache_level,
51 bool mappable, bool nonblocking) 47 bool mappable, bool nonblocking)
52{ 48{
53 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
54 struct i915_address_space *vm = &dev_priv->gtt.base;
55 struct list_head eviction_list, unwind_list; 50 struct list_head eviction_list, unwind_list;
56 struct i915_vma *vma; 51 struct i915_vma *vma;
57 struct drm_i915_gem_object *obj; 52 struct drm_i915_gem_object *obj;
@@ -83,16 +78,18 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
83 */ 78 */
84 79
85 INIT_LIST_HEAD(&unwind_list); 80 INIT_LIST_HEAD(&unwind_list);
86 if (mappable) 81 if (mappable) {
82 BUG_ON(!i915_is_ggtt(vm));
87 drm_mm_init_scan_with_range(&vm->mm, min_size, 83 drm_mm_init_scan_with_range(&vm->mm, min_size,
88 alignment, cache_level, 0, 84 alignment, cache_level, 0,
89 dev_priv->gtt.mappable_end); 85 dev_priv->gtt.mappable_end);
90 else 86 } else
91 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); 87 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
92 88
93 /* First see if there is a large enough contiguous idle region... */ 89 /* First see if there is a large enough contiguous idle region... */
94 list_for_each_entry(obj, &vm->inactive_list, mm_list) { 90 list_for_each_entry(obj, &vm->inactive_list, mm_list) {
95 if (mark_free(obj, &unwind_list)) 91 struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
92 if (mark_free(vma, &unwind_list))
96 goto found; 93 goto found;
97 } 94 }
98 95
@@ -101,7 +98,8 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
101 98
102 /* Now merge in the soon-to-be-expired objects... */ 99 /* Now merge in the soon-to-be-expired objects... */
103 list_for_each_entry(obj, &vm->active_list, mm_list) { 100 list_for_each_entry(obj, &vm->active_list, mm_list) {
104 if (mark_free(obj, &unwind_list)) 101 struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm);
102 if (mark_free(vma, &unwind_list))
105 goto found; 103 goto found;
106 } 104 }
107 105
@@ -111,7 +109,7 @@ none:
111 obj = list_first_entry(&unwind_list, 109 obj = list_first_entry(&unwind_list,
112 struct drm_i915_gem_object, 110 struct drm_i915_gem_object,
113 exec_list); 111 exec_list);
114 vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); 112 vma = i915_gem_obj_to_vma(obj, vm);
115 ret = drm_mm_scan_remove_block(&vma->node); 113 ret = drm_mm_scan_remove_block(&vma->node);
116 BUG_ON(ret); 114 BUG_ON(ret);
117 115
@@ -132,7 +130,7 @@ found:
132 obj = list_first_entry(&unwind_list, 130 obj = list_first_entry(&unwind_list,
133 struct drm_i915_gem_object, 131 struct drm_i915_gem_object,
134 exec_list); 132 exec_list);
135 vma = i915_gem_obj_to_vma(obj, &dev_priv->gtt.base); 133 vma = i915_gem_obj_to_vma(obj, vm);
136 if (drm_mm_scan_remove_block(&vma->node)) { 134 if (drm_mm_scan_remove_block(&vma->node)) {
137 list_move(&obj->exec_list, &eviction_list); 135 list_move(&obj->exec_list, &eviction_list);
138 drm_gem_object_reference(&obj->base); 136 drm_gem_object_reference(&obj->base);
@@ -147,7 +145,7 @@ found:
147 struct drm_i915_gem_object, 145 struct drm_i915_gem_object,
148 exec_list); 146 exec_list);
149 if (ret == 0) 147 if (ret == 0)
150 ret = i915_gem_object_ggtt_unbind(obj); 148 ret = i915_vma_unbind(i915_gem_obj_to_vma(obj, vm));
151 149
152 list_del_init(&obj->exec_list); 150 list_del_init(&obj->exec_list);
153 drm_gem_object_unreference(&obj->base); 151 drm_gem_object_unreference(&obj->base);
@@ -160,13 +158,18 @@ int
160i915_gem_evict_everything(struct drm_device *dev) 158i915_gem_evict_everything(struct drm_device *dev)
161{ 159{
162 drm_i915_private_t *dev_priv = dev->dev_private; 160 drm_i915_private_t *dev_priv = dev->dev_private;
163 struct i915_address_space *vm = &dev_priv->gtt.base; 161 struct i915_address_space *vm;
164 struct drm_i915_gem_object *obj, *next; 162 struct drm_i915_gem_object *obj, *next;
165 bool lists_empty; 163 bool lists_empty = true;
166 int ret; 164 int ret;
167 165
168 lists_empty = (list_empty(&vm->inactive_list) && 166 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
169 list_empty(&vm->active_list)); 167 lists_empty = (list_empty(&vm->inactive_list) &&
168 list_empty(&vm->active_list));
169 if (!lists_empty)
170 lists_empty = false;
171 }
172
170 if (lists_empty) 173 if (lists_empty)
171 return -ENOSPC; 174 return -ENOSPC;
172 175
@@ -183,9 +186,11 @@ i915_gem_evict_everything(struct drm_device *dev)
183 i915_gem_retire_requests(dev); 186 i915_gem_retire_requests(dev);
184 187
185 /* Having flushed everything, unbind() should never raise an error */ 188 /* Having flushed everything, unbind() should never raise an error */
186 list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) 189 list_for_each_entry(vm, &dev_priv->vm_list, global_link) {
187 if (obj->pin_count == 0) 190 list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
188 WARN_ON(i915_gem_object_ggtt_unbind(obj)); 191 if (obj->pin_count == 0)
192 WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm)));
193 }
189 194
190 return 0; 195 return 0;
191} 196}