aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-07-16 19:50:08 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-07-17 16:24:32 -0400
commit5cef07e1628300aeda9ac9dae95a2b406175b3ff (patch)
treeddc33253b05f93a526d5f9cf7b9d128abfe701a4
parenta7bbbd63e79a89b3e7b77eb734f2773ad69a2a43 (diff)
drm/i915: Move active/inactive lists to new mm
Shamelessly manipulated out of Daniel :-) "When moving the lists around explain that the active/inactive stuff is used by eviction when we run out of address space, so needs to be per-vma and per-address space. Bound/unbound otoh is used by the shrinker which only cares about the amount of memory used and not one bit about in which address space this memory is all used in. Of course to actual kick out an object we need to unbind it from every address space, but for that we have the per-object list of vmas." v2: Leave the bound list as a global one. (Chris, indirectly) v3: Rebased with no i915_gtt_vm. In most places I added a new *vm local, since it will eventually be replaces by a vm argument. Put comment back inline, since it no longer makes sense to do otherwise. v4: Rebased on hangcheck/error state movement Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Reviewed-by: Imre Deak <imre.deak@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c16
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h46
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c33
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c18
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c3
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c8
7 files changed, 67 insertions, 59 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1c697c0ab7e5..a9246e9c5f9d 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -135,7 +135,8 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
135 uintptr_t list = (uintptr_t) node->info_ent->data; 135 uintptr_t list = (uintptr_t) node->info_ent->data;
136 struct list_head *head; 136 struct list_head *head;
137 struct drm_device *dev = node->minor->dev; 137 struct drm_device *dev = node->minor->dev;
138 drm_i915_private_t *dev_priv = dev->dev_private; 138 struct drm_i915_private *dev_priv = dev->dev_private;
139 struct i915_address_space *vm = &dev_priv->gtt.base;
139 struct drm_i915_gem_object *obj; 140 struct drm_i915_gem_object *obj;
140 size_t total_obj_size, total_gtt_size; 141 size_t total_obj_size, total_gtt_size;
141 int count, ret; 142 int count, ret;
@@ -147,11 +148,11 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
147 switch (list) { 148 switch (list) {
148 case ACTIVE_LIST: 149 case ACTIVE_LIST:
149 seq_puts(m, "Active:\n"); 150 seq_puts(m, "Active:\n");
150 head = &dev_priv->mm.active_list; 151 head = &vm->active_list;
151 break; 152 break;
152 case INACTIVE_LIST: 153 case INACTIVE_LIST:
153 seq_puts(m, "Inactive:\n"); 154 seq_puts(m, "Inactive:\n");
154 head = &dev_priv->mm.inactive_list; 155 head = &vm->inactive_list;
155 break; 156 break;
156 default: 157 default:
157 mutex_unlock(&dev->struct_mutex); 158 mutex_unlock(&dev->struct_mutex);
@@ -219,6 +220,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
219 u32 count, mappable_count, purgeable_count; 220 u32 count, mappable_count, purgeable_count;
220 size_t size, mappable_size, purgeable_size; 221 size_t size, mappable_size, purgeable_size;
221 struct drm_i915_gem_object *obj; 222 struct drm_i915_gem_object *obj;
223 struct i915_address_space *vm = &dev_priv->gtt.base;
222 struct drm_file *file; 224 struct drm_file *file;
223 int ret; 225 int ret;
224 226
@@ -236,12 +238,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
236 count, mappable_count, size, mappable_size); 238 count, mappable_count, size, mappable_size);
237 239
238 size = count = mappable_size = mappable_count = 0; 240 size = count = mappable_size = mappable_count = 0;
239 count_objects(&dev_priv->mm.active_list, mm_list); 241 count_objects(&vm->active_list, mm_list);
240 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", 242 seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
241 count, mappable_count, size, mappable_size); 243 count, mappable_count, size, mappable_size);
242 244
243 size = count = mappable_size = mappable_count = 0; 245 size = count = mappable_size = mappable_count = 0;
244 count_objects(&dev_priv->mm.inactive_list, mm_list); 246 count_objects(&vm->inactive_list, mm_list);
245 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", 247 seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
246 count, mappable_count, size, mappable_size); 248 count, mappable_count, size, mappable_size);
247 249
@@ -1625,6 +1627,7 @@ i915_drop_caches_set(void *data, u64 val)
1625 struct drm_device *dev = data; 1627 struct drm_device *dev = data;
1626 struct drm_i915_private *dev_priv = dev->dev_private; 1628 struct drm_i915_private *dev_priv = dev->dev_private;
1627 struct drm_i915_gem_object *obj, *next; 1629 struct drm_i915_gem_object *obj, *next;
1630 struct i915_address_space *vm = &dev_priv->gtt.base;
1628 int ret; 1631 int ret;
1629 1632
1630 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); 1633 DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val);
@@ -1645,7 +1648,8 @@ i915_drop_caches_set(void *data, u64 val)
1645 i915_gem_retire_requests(dev); 1648 i915_gem_retire_requests(dev);
1646 1649
1647 if (val & DROP_BOUND) { 1650 if (val & DROP_BOUND) {
1648 list_for_each_entry_safe(obj, next, &dev_priv->mm.inactive_list, mm_list) 1651 list_for_each_entry_safe(obj, next, &vm->inactive_list,
1652 mm_list)
1649 if (obj->pin_count == 0) { 1653 if (obj->pin_count == 0) {
1650 ret = i915_gem_object_unbind(obj); 1654 ret = i915_gem_object_unbind(obj);
1651 if (ret) 1655 if (ret)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 1e1664e8a599..ee21af3a17ac 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -458,6 +458,29 @@ struct i915_address_space {
458 struct page *page; 458 struct page *page;
459 } scratch; 459 } scratch;
460 460
461 /**
462 * List of objects currently involved in rendering.
463 *
464 * Includes buffers having the contents of their GPU caches
465 * flushed, not necessarily primitives. last_rendering_seqno
466 * represents when the rendering involved will be completed.
467 *
468 * A reference is held on the buffer while on this list.
469 */
470 struct list_head active_list;
471
472 /**
473 * LRU list of objects which are not in the ringbuffer and
474 * are ready to unbind, but are still in the GTT.
475 *
476 * last_rendering_seqno is 0 while an object is in this list.
477 *
478 * A reference is not held on the buffer while on this list,
479 * as merely being GTT-bound shouldn't prevent its being
480 * freed, and we'll pull it off the list in the free path.
481 */
482 struct list_head inactive_list;
483
461 /* FIXME: Need a more generic return type */ 484 /* FIXME: Need a more generic return type */
462 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr, 485 gen6_gtt_pte_t (*pte_encode)(dma_addr_t addr,
463 enum i915_cache_level level); 486 enum i915_cache_level level);
@@ -852,29 +875,6 @@ struct i915_gem_mm {
852 struct shrinker inactive_shrinker; 875 struct shrinker inactive_shrinker;
853 bool shrinker_no_lock_stealing; 876 bool shrinker_no_lock_stealing;
854 877
855 /**
856 * List of objects currently involved in rendering.
857 *
858 * Includes buffers having the contents of their GPU caches
859 * flushed, not necessarily primitives. last_rendering_seqno
860 * represents when the rendering involved will be completed.
861 *
862 * A reference is held on the buffer while on this list.
863 */
864 struct list_head active_list;
865
866 /**
867 * LRU list of objects which are not in the ringbuffer and
868 * are ready to unbind, but are still in the GTT.
869 *
870 * last_rendering_seqno is 0 while an object is in this list.
871 *
872 * A reference is not held on the buffer while on this list,
873 * as merely being GTT-bound shouldn't prevent its being
874 * freed, and we'll pull it off the list in the free path.
875 */
876 struct list_head inactive_list;
877
878 /** LRU list of objects with fence regs on them. */ 878 /** LRU list of objects with fence regs on them. */
879 struct list_head fence_list; 879 struct list_head fence_list;
880 880
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8f37229a3b52..8830856bf3f9 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1692,6 +1692,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1692 bool purgeable_only) 1692 bool purgeable_only)
1693{ 1693{
1694 struct drm_i915_gem_object *obj, *next; 1694 struct drm_i915_gem_object *obj, *next;
1695 struct i915_address_space *vm = &dev_priv->gtt.base;
1695 long count = 0; 1696 long count = 0;
1696 1697
1697 list_for_each_entry_safe(obj, next, 1698 list_for_each_entry_safe(obj, next,
@@ -1705,9 +1706,7 @@ __i915_gem_shrink(struct drm_i915_private *dev_priv, long target,
1705 } 1706 }
1706 } 1707 }
1707 1708
1708 list_for_each_entry_safe(obj, next, 1709 list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) {
1709 &dev_priv->mm.inactive_list,
1710 mm_list) {
1711 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) && 1710 if ((i915_gem_object_is_purgeable(obj) || !purgeable_only) &&
1712 i915_gem_object_unbind(obj) == 0 && 1711 i915_gem_object_unbind(obj) == 0 &&
1713 i915_gem_object_put_pages(obj) == 0) { 1712 i915_gem_object_put_pages(obj) == 0) {
@@ -1878,6 +1877,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1878{ 1877{
1879 struct drm_device *dev = obj->base.dev; 1878 struct drm_device *dev = obj->base.dev;
1880 struct drm_i915_private *dev_priv = dev->dev_private; 1879 struct drm_i915_private *dev_priv = dev->dev_private;
1880 struct i915_address_space *vm = &dev_priv->gtt.base;
1881 u32 seqno = intel_ring_get_seqno(ring); 1881 u32 seqno = intel_ring_get_seqno(ring);
1882 1882
1883 BUG_ON(ring == NULL); 1883 BUG_ON(ring == NULL);
@@ -1890,7 +1890,7 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
1890 } 1890 }
1891 1891
1892 /* Move from whatever list we were on to the tail of execution. */ 1892 /* Move from whatever list we were on to the tail of execution. */
1893 list_move_tail(&obj->mm_list, &dev_priv->mm.active_list); 1893 list_move_tail(&obj->mm_list, &vm->active_list);
1894 list_move_tail(&obj->ring_list, &ring->active_list); 1894 list_move_tail(&obj->ring_list, &ring->active_list);
1895 1895
1896 obj->last_read_seqno = seqno; 1896 obj->last_read_seqno = seqno;
@@ -1914,11 +1914,12 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
1914{ 1914{
1915 struct drm_device *dev = obj->base.dev; 1915 struct drm_device *dev = obj->base.dev;
1916 struct drm_i915_private *dev_priv = dev->dev_private; 1916 struct drm_i915_private *dev_priv = dev->dev_private;
1917 struct i915_address_space *vm = &dev_priv->gtt.base;
1917 1918
1918 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); 1919 BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS);
1919 BUG_ON(!obj->active); 1920 BUG_ON(!obj->active);
1920 1921
1921 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 1922 list_move_tail(&obj->mm_list, &vm->inactive_list);
1922 1923
1923 list_del_init(&obj->ring_list); 1924 list_del_init(&obj->ring_list);
1924 obj->ring = NULL; 1925 obj->ring = NULL;
@@ -2270,6 +2271,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
2270void i915_gem_reset(struct drm_device *dev) 2271void i915_gem_reset(struct drm_device *dev)
2271{ 2272{
2272 struct drm_i915_private *dev_priv = dev->dev_private; 2273 struct drm_i915_private *dev_priv = dev->dev_private;
2274 struct i915_address_space *vm = &dev_priv->gtt.base;
2273 struct drm_i915_gem_object *obj; 2275 struct drm_i915_gem_object *obj;
2274 struct intel_ring_buffer *ring; 2276 struct intel_ring_buffer *ring;
2275 int i; 2277 int i;
@@ -2280,12 +2282,8 @@ void i915_gem_reset(struct drm_device *dev)
2280 /* Move everything out of the GPU domains to ensure we do any 2282 /* Move everything out of the GPU domains to ensure we do any
2281 * necessary invalidation upon reuse. 2283 * necessary invalidation upon reuse.
2282 */ 2284 */
2283 list_for_each_entry(obj, 2285 list_for_each_entry(obj, &vm->inactive_list, mm_list)
2284 &dev_priv->mm.inactive_list,
2285 mm_list)
2286 {
2287 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 2286 obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
2288 }
2289 2287
2290 /* The fence registers are invalidated so clear them out */ 2288 /* The fence registers are invalidated so clear them out */
2291 i915_gem_reset_fences(dev); 2289 i915_gem_reset_fences(dev);
@@ -3076,6 +3074,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
3076{ 3074{
3077 struct drm_device *dev = obj->base.dev; 3075 struct drm_device *dev = obj->base.dev;
3078 drm_i915_private_t *dev_priv = dev->dev_private; 3076 drm_i915_private_t *dev_priv = dev->dev_private;
3077 struct i915_address_space *vm = &dev_priv->gtt.base;
3079 u32 size, fence_size, fence_alignment, unfenced_alignment; 3078 u32 size, fence_size, fence_alignment, unfenced_alignment;
3080 bool mappable, fenceable; 3079 bool mappable, fenceable;
3081 size_t gtt_max = map_and_fenceable ? 3080 size_t gtt_max = map_and_fenceable ?
@@ -3151,7 +3150,7 @@ search_free:
3151 } 3150 }
3152 3151
3153 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); 3152 list_move_tail(&obj->global_list, &dev_priv->mm.bound_list);
3154 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3153 list_add_tail(&obj->mm_list, &vm->inactive_list);
3155 3154
3156 fenceable = 3155 fenceable =
3157 i915_gem_obj_ggtt_size(obj) == fence_size && 3156 i915_gem_obj_ggtt_size(obj) == fence_size &&
@@ -3299,7 +3298,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
3299 3298
3300 /* And bump the LRU for this access */ 3299 /* And bump the LRU for this access */
3301 if (i915_gem_object_is_inactive(obj)) 3300 if (i915_gem_object_is_inactive(obj))
3302 list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 3301 list_move_tail(&obj->mm_list,
3302 &dev_priv->gtt.base.inactive_list);
3303 3303
3304 return 0; 3304 return 0;
3305} 3305}
@@ -4242,7 +4242,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4242 return ret; 4242 return ret;
4243 } 4243 }
4244 4244
4245 BUG_ON(!list_empty(&dev_priv->mm.active_list)); 4245 BUG_ON(!list_empty(&dev_priv->gtt.base.active_list));
4246 mutex_unlock(&dev->struct_mutex); 4246 mutex_unlock(&dev->struct_mutex);
4247 4247
4248 ret = drm_irq_install(dev); 4248 ret = drm_irq_install(dev);
@@ -4320,8 +4320,8 @@ i915_gem_load(struct drm_device *dev)
4320 SLAB_HWCACHE_ALIGN, 4320 SLAB_HWCACHE_ALIGN,
4321 NULL); 4321 NULL);
4322 4322
4323 INIT_LIST_HEAD(&dev_priv->mm.active_list); 4323 INIT_LIST_HEAD(&dev_priv->gtt.base.active_list);
4324 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4324 INIT_LIST_HEAD(&dev_priv->gtt.base.inactive_list);
4325 INIT_LIST_HEAD(&dev_priv->mm.unbound_list); 4325 INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
4326 INIT_LIST_HEAD(&dev_priv->mm.bound_list); 4326 INIT_LIST_HEAD(&dev_priv->mm.bound_list);
4327 INIT_LIST_HEAD(&dev_priv->mm.fence_list); 4327 INIT_LIST_HEAD(&dev_priv->mm.fence_list);
@@ -4591,6 +4591,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4591 struct drm_i915_private, 4591 struct drm_i915_private,
4592 mm.inactive_shrinker); 4592 mm.inactive_shrinker);
4593 struct drm_device *dev = dev_priv->dev; 4593 struct drm_device *dev = dev_priv->dev;
4594 struct i915_address_space *vm = &dev_priv->gtt.base;
4594 struct drm_i915_gem_object *obj; 4595 struct drm_i915_gem_object *obj;
4595 int nr_to_scan = sc->nr_to_scan; 4596 int nr_to_scan = sc->nr_to_scan;
4596 bool unlock = true; 4597 bool unlock = true;
@@ -4619,7 +4620,7 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
4619 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) 4620 list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
4620 if (obj->pages_pin_count == 0) 4621 if (obj->pages_pin_count == 0)
4621 cnt += obj->base.size >> PAGE_SHIFT; 4622 cnt += obj->base.size >> PAGE_SHIFT;
4622 list_for_each_entry(obj, &dev_priv->mm.inactive_list, global_list) 4623 list_for_each_entry(obj, &vm->inactive_list, global_list)
4623 if (obj->pin_count == 0 && obj->pages_pin_count == 0) 4624 if (obj->pin_count == 0 && obj->pages_pin_count == 0)
4624 cnt += obj->base.size >> PAGE_SHIFT; 4625 cnt += obj->base.size >> PAGE_SHIFT;
4625 4626
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 582e6a5f3dac..bf945a39fbb1 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -97,7 +97,7 @@ i915_verify_lists(struct drm_device *dev)
97 } 97 }
98 } 98 }
99 99
100 list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) { 100 list_for_each_entry(obj, &i915_gtt_vm->inactive_list, list) {
101 if (obj->base.dev != dev || 101 if (obj->base.dev != dev ||
102 !atomic_read(&obj->base.refcount.refcount)) { 102 !atomic_read(&obj->base.refcount.refcount)) {
103 DRM_ERROR("freed inactive %p\n", obj); 103 DRM_ERROR("freed inactive %p\n", obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index f1c9ab096b00..43b82350d8dc 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -47,6 +47,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
47 bool mappable, bool nonblocking) 47 bool mappable, bool nonblocking)
48{ 48{
49 drm_i915_private_t *dev_priv = dev->dev_private; 49 drm_i915_private_t *dev_priv = dev->dev_private;
50 struct i915_address_space *vm = &dev_priv->gtt.base;
50 struct list_head eviction_list, unwind_list; 51 struct list_head eviction_list, unwind_list;
51 struct drm_i915_gem_object *obj; 52 struct drm_i915_gem_object *obj;
52 int ret = 0; 53 int ret = 0;
@@ -78,15 +79,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
78 79
79 INIT_LIST_HEAD(&unwind_list); 80 INIT_LIST_HEAD(&unwind_list);
80 if (mappable) 81 if (mappable)
81 drm_mm_init_scan_with_range(&dev_priv->gtt.base.mm, min_size, 82 drm_mm_init_scan_with_range(&vm->mm, min_size,
82 alignment, cache_level, 0, 83 alignment, cache_level, 0,
83 dev_priv->gtt.mappable_end); 84 dev_priv->gtt.mappable_end);
84 else 85 else
85 drm_mm_init_scan(&dev_priv->gtt.base.mm, min_size, alignment, 86 drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
86 cache_level);
87 87
88 /* First see if there is a large enough contiguous idle region... */ 88 /* First see if there is a large enough contiguous idle region... */
89 list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) { 89 list_for_each_entry(obj, &vm->inactive_list, mm_list) {
90 if (mark_free(obj, &unwind_list)) 90 if (mark_free(obj, &unwind_list))
91 goto found; 91 goto found;
92 } 92 }
@@ -95,7 +95,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
95 goto none; 95 goto none;
96 96
97 /* Now merge in the soon-to-be-expired objects... */ 97 /* Now merge in the soon-to-be-expired objects... */
98 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 98 list_for_each_entry(obj, &vm->active_list, mm_list) {
99 if (mark_free(obj, &unwind_list)) 99 if (mark_free(obj, &unwind_list))
100 goto found; 100 goto found;
101 } 101 }
@@ -154,12 +154,13 @@ int
154i915_gem_evict_everything(struct drm_device *dev) 154i915_gem_evict_everything(struct drm_device *dev)
155{ 155{
156 drm_i915_private_t *dev_priv = dev->dev_private; 156 drm_i915_private_t *dev_priv = dev->dev_private;
157 struct i915_address_space *vm = &dev_priv->gtt.base;
157 struct drm_i915_gem_object *obj, *next; 158 struct drm_i915_gem_object *obj, *next;
158 bool lists_empty; 159 bool lists_empty;
159 int ret; 160 int ret;
160 161
161 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 162 lists_empty = (list_empty(&vm->inactive_list) &&
162 list_empty(&dev_priv->mm.active_list)); 163 list_empty(&vm->active_list));
163 if (lists_empty) 164 if (lists_empty)
164 return -ENOSPC; 165 return -ENOSPC;
165 166
@@ -176,8 +177,7 @@ i915_gem_evict_everything(struct drm_device *dev)
176 i915_gem_retire_requests(dev); 177 i915_gem_retire_requests(dev);
177 178
178 /* Having flushed everything, unbind() should never raise an error */ 179 /* Having flushed everything, unbind() should never raise an error */
179 list_for_each_entry_safe(obj, next, 180 list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list)
180 &dev_priv->mm.inactive_list, mm_list)
181 if (obj->pin_count == 0) 181 if (obj->pin_count == 0)
182 WARN_ON(i915_gem_object_unbind(obj)); 182 WARN_ON(i915_gem_object_unbind(obj));
183 183
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 5d38cb0cd1ce..90a618335db9 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -348,6 +348,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
348 u32 size) 348 u32 size)
349{ 349{
350 struct drm_i915_private *dev_priv = dev->dev_private; 350 struct drm_i915_private *dev_priv = dev->dev_private;
351 struct i915_address_space *vm = &dev_priv->gtt.base;
351 struct drm_i915_gem_object *obj; 352 struct drm_i915_gem_object *obj;
352 struct drm_mm_node *stolen; 353 struct drm_mm_node *stolen;
353 int ret; 354 int ret;
@@ -408,7 +409,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
408 obj->has_global_gtt_mapping = 1; 409 obj->has_global_gtt_mapping = 1;
409 410
410 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); 411 list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
411 list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list); 412 list_add_tail(&obj->mm_list, &vm->inactive_list);
412 413
413 return obj; 414 return obj;
414 415
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 58386cebb865..d970d84da65f 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -622,6 +622,7 @@ static struct drm_i915_error_object *
622i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, 622i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
623 struct intel_ring_buffer *ring) 623 struct intel_ring_buffer *ring)
624{ 624{
625 struct i915_address_space *vm = &dev_priv->gtt.base;
625 struct drm_i915_gem_object *obj; 626 struct drm_i915_gem_object *obj;
626 u32 seqno; 627 u32 seqno;
627 628
@@ -641,7 +642,7 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv,
641 } 642 }
642 643
643 seqno = ring->get_seqno(ring, false); 644 seqno = ring->get_seqno(ring, false);
644 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { 645 list_for_each_entry(obj, &vm->active_list, mm_list) {
645 if (obj->ring != ring) 646 if (obj->ring != ring)
646 continue; 647 continue;
647 648
@@ -773,11 +774,12 @@ static void i915_gem_record_rings(struct drm_device *dev,
773static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, 774static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
774 struct drm_i915_error_state *error) 775 struct drm_i915_error_state *error)
775{ 776{
777 struct i915_address_space *vm = &dev_priv->gtt.base;
776 struct drm_i915_gem_object *obj; 778 struct drm_i915_gem_object *obj;
777 int i; 779 int i;
778 780
779 i = 0; 781 i = 0;
780 list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) 782 list_for_each_entry(obj, &vm->active_list, mm_list)
781 i++; 783 i++;
782 error->active_bo_count = i; 784 error->active_bo_count = i;
783 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) 785 list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
@@ -797,7 +799,7 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv,
797 error->active_bo_count = 799 error->active_bo_count =
798 capture_active_bo(error->active_bo, 800 capture_active_bo(error->active_bo,
799 error->active_bo_count, 801 error->active_bo_count,
800 &dev_priv->mm.active_list); 802 &vm->active_list);
801 803
802 if (error->pinned_bo) 804 if (error->pinned_bo)
803 error->pinned_bo_count = 805 error->pinned_bo_count =