diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 54 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_context.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 14 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_stolen.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gpu_error.c | 37 |
8 files changed, 86 insertions, 59 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 5d52a23d5662..a1f4c91fb112 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -149,7 +149,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
149 | struct drm_device *dev = node->minor->dev; | 149 | struct drm_device *dev = node->minor->dev; |
150 | struct drm_i915_private *dev_priv = dev->dev_private; | 150 | struct drm_i915_private *dev_priv = dev->dev_private; |
151 | struct i915_address_space *vm = &dev_priv->gtt.base; | 151 | struct i915_address_space *vm = &dev_priv->gtt.base; |
152 | struct drm_i915_gem_object *obj; | 152 | struct i915_vma *vma; |
153 | size_t total_obj_size, total_gtt_size; | 153 | size_t total_obj_size, total_gtt_size; |
154 | int count, ret; | 154 | int count, ret; |
155 | 155 | ||
@@ -157,6 +157,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
157 | if (ret) | 157 | if (ret) |
158 | return ret; | 158 | return ret; |
159 | 159 | ||
160 | /* FIXME: the user of this interface might want more than just GGTT */ | ||
160 | switch (list) { | 161 | switch (list) { |
161 | case ACTIVE_LIST: | 162 | case ACTIVE_LIST: |
162 | seq_puts(m, "Active:\n"); | 163 | seq_puts(m, "Active:\n"); |
@@ -172,12 +173,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
172 | } | 173 | } |
173 | 174 | ||
174 | total_obj_size = total_gtt_size = count = 0; | 175 | total_obj_size = total_gtt_size = count = 0; |
175 | list_for_each_entry(obj, head, mm_list) { | 176 | list_for_each_entry(vma, head, mm_list) { |
176 | seq_puts(m, " "); | 177 | seq_printf(m, " "); |
177 | describe_obj(m, obj); | 178 | describe_obj(m, vma->obj); |
178 | seq_putc(m, '\n'); | 179 | seq_printf(m, "\n"); |
179 | total_obj_size += obj->base.size; | 180 | total_obj_size += vma->obj->base.size; |
180 | total_gtt_size += i915_gem_obj_ggtt_size(obj); | 181 | total_gtt_size += vma->node.size; |
181 | count++; | 182 | count++; |
182 | } | 183 | } |
183 | mutex_unlock(&dev->struct_mutex); | 184 | mutex_unlock(&dev->struct_mutex); |
@@ -224,7 +225,18 @@ static int per_file_stats(int id, void *ptr, void *data) | |||
224 | return 0; | 225 | return 0; |
225 | } | 226 | } |
226 | 227 | ||
227 | static int i915_gem_object_info(struct seq_file *m, void *data) | 228 | #define count_vmas(list, member) do { \ |
229 | list_for_each_entry(vma, list, member) { \ | ||
230 | size += i915_gem_obj_ggtt_size(vma->obj); \ | ||
231 | ++count; \ | ||
232 | if (vma->obj->map_and_fenceable) { \ | ||
233 | mappable_size += i915_gem_obj_ggtt_size(vma->obj); \ | ||
234 | ++mappable_count; \ | ||
235 | } \ | ||
236 | } \ | ||
237 | } while (0) | ||
238 | |||
239 | static int i915_gem_object_info(struct seq_file *m, void* data) | ||
228 | { | 240 | { |
229 | struct drm_info_node *node = (struct drm_info_node *) m->private; | 241 | struct drm_info_node *node = (struct drm_info_node *) m->private; |
230 | struct drm_device *dev = node->minor->dev; | 242 | struct drm_device *dev = node->minor->dev; |
@@ -234,6 +246,7 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
234 | struct drm_i915_gem_object *obj; | 246 | struct drm_i915_gem_object *obj; |
235 | struct i915_address_space *vm = &dev_priv->gtt.base; | 247 | struct i915_address_space *vm = &dev_priv->gtt.base; |
236 | struct drm_file *file; | 248 | struct drm_file *file; |
249 | struct i915_vma *vma; | ||
237 | int ret; | 250 | int ret; |
238 | 251 | ||
239 | ret = mutex_lock_interruptible(&dev->struct_mutex); | 252 | ret = mutex_lock_interruptible(&dev->struct_mutex); |
@@ -250,12 +263,12 @@ static int i915_gem_object_info(struct seq_file *m, void *data) | |||
250 | count, mappable_count, size, mappable_size); | 263 | count, mappable_count, size, mappable_size); |
251 | 264 | ||
252 | size = count = mappable_size = mappable_count = 0; | 265 | size = count = mappable_size = mappable_count = 0; |
253 | count_objects(&vm->active_list, mm_list); | 266 | count_vmas(&vm->active_list, mm_list); |
254 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", | 267 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", |
255 | count, mappable_count, size, mappable_size); | 268 | count, mappable_count, size, mappable_size); |
256 | 269 | ||
257 | size = count = mappable_size = mappable_count = 0; | 270 | size = count = mappable_size = mappable_count = 0; |
258 | count_objects(&vm->inactive_list, mm_list); | 271 | count_vmas(&vm->inactive_list, mm_list); |
259 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", | 272 | seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n", |
260 | count, mappable_count, size, mappable_size); | 273 | count, mappable_count, size, mappable_size); |
261 | 274 | ||
@@ -1774,7 +1787,8 @@ i915_drop_caches_set(void *data, u64 val) | |||
1774 | struct drm_device *dev = data; | 1787 | struct drm_device *dev = data; |
1775 | struct drm_i915_private *dev_priv = dev->dev_private; | 1788 | struct drm_i915_private *dev_priv = dev->dev_private; |
1776 | struct drm_i915_gem_object *obj, *next; | 1789 | struct drm_i915_gem_object *obj, *next; |
1777 | struct i915_address_space *vm = &dev_priv->gtt.base; | 1790 | struct i915_address_space *vm; |
1791 | struct i915_vma *vma, *x; | ||
1778 | int ret; | 1792 | int ret; |
1779 | 1793 | ||
1780 | DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); | 1794 | DRM_DEBUG_DRIVER("Dropping caches: 0x%08llx\n", val); |
@@ -1795,14 +1809,16 @@ i915_drop_caches_set(void *data, u64 val) | |||
1795 | i915_gem_retire_requests(dev); | 1809 | i915_gem_retire_requests(dev); |
1796 | 1810 | ||
1797 | if (val & DROP_BOUND) { | 1811 | if (val & DROP_BOUND) { |
1798 | list_for_each_entry_safe(obj, next, &vm->inactive_list, | 1812 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
1799 | mm_list) { | 1813 | list_for_each_entry_safe(vma, x, &vm->inactive_list, |
1800 | if (obj->pin_count) | 1814 | mm_list) { |
1801 | continue; | 1815 | if (vma->obj->pin_count) |
1802 | 1816 | continue; | |
1803 | ret = i915_gem_object_ggtt_unbind(obj); | 1817 | |
1804 | if (ret) | 1818 | ret = i915_vma_unbind(vma); |
1805 | goto unlock; | 1819 | if (ret) |
1820 | goto unlock; | ||
1821 | } | ||
1806 | } | 1822 | } |
1807 | } | 1823 | } |
1808 | 1824 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cb4521d95429..20becc5500bd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -558,6 +558,9 @@ struct i915_vma { | |||
558 | struct drm_i915_gem_object *obj; | 558 | struct drm_i915_gem_object *obj; |
559 | struct i915_address_space *vm; | 559 | struct i915_address_space *vm; |
560 | 560 | ||
561 | /** This object's place on the active/inactive lists */ | ||
562 | struct list_head mm_list; | ||
563 | |||
561 | struct list_head vma_link; /* Link in the object's VMA list */ | 564 | struct list_head vma_link; /* Link in the object's VMA list */ |
562 | }; | 565 | }; |
563 | 566 | ||
@@ -1299,9 +1302,7 @@ struct drm_i915_gem_object { | |||
1299 | struct drm_mm_node *stolen; | 1302 | struct drm_mm_node *stolen; |
1300 | struct list_head global_list; | 1303 | struct list_head global_list; |
1301 | 1304 | ||
1302 | /** This object's place on the active/inactive lists */ | ||
1303 | struct list_head ring_list; | 1305 | struct list_head ring_list; |
1304 | struct list_head mm_list; | ||
1305 | /** This object's place in the batchbuffer or on the eviction list */ | 1306 | /** This object's place in the batchbuffer or on the eviction list */ |
1306 | struct list_head exec_list; | 1307 | struct list_head exec_list; |
1307 | 1308 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 5eacc497f179..985a13035550 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1886,7 +1886,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1886 | { | 1886 | { |
1887 | struct drm_device *dev = obj->base.dev; | 1887 | struct drm_device *dev = obj->base.dev; |
1888 | struct drm_i915_private *dev_priv = dev->dev_private; | 1888 | struct drm_i915_private *dev_priv = dev->dev_private; |
1889 | struct i915_address_space *vm = &dev_priv->gtt.base; | ||
1890 | u32 seqno = intel_ring_get_seqno(ring); | 1889 | u32 seqno = intel_ring_get_seqno(ring); |
1891 | 1890 | ||
1892 | BUG_ON(ring == NULL); | 1891 | BUG_ON(ring == NULL); |
@@ -1902,8 +1901,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1902 | obj->active = 1; | 1901 | obj->active = 1; |
1903 | } | 1902 | } |
1904 | 1903 | ||
1905 | /* Move from whatever list we were on to the tail of execution. */ | ||
1906 | list_move_tail(&obj->mm_list, &vm->active_list); | ||
1907 | list_move_tail(&obj->ring_list, &ring->active_list); | 1904 | list_move_tail(&obj->ring_list, &ring->active_list); |
1908 | 1905 | ||
1909 | obj->last_read_seqno = seqno; | 1906 | obj->last_read_seqno = seqno; |
@@ -1925,14 +1922,14 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1925 | static void | 1922 | static void |
1926 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | 1923 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1927 | { | 1924 | { |
1928 | struct drm_device *dev = obj->base.dev; | 1925 | struct drm_i915_private *dev_priv = obj->base.dev->dev_private; |
1929 | struct drm_i915_private *dev_priv = dev->dev_private; | 1926 | struct i915_address_space *ggtt_vm = &dev_priv->gtt.base; |
1930 | struct i915_address_space *vm = &dev_priv->gtt.base; | 1927 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, ggtt_vm); |
1931 | 1928 | ||
1932 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | 1929 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1933 | BUG_ON(!obj->active); | 1930 | BUG_ON(!obj->active); |
1934 | 1931 | ||
1935 | list_move_tail(&obj->mm_list, &vm->inactive_list); | 1932 | list_move_tail(&vma->mm_list, &ggtt_vm->inactive_list); |
1936 | 1933 | ||
1937 | list_del_init(&obj->ring_list); | 1934 | list_del_init(&obj->ring_list); |
1938 | obj->ring = NULL; | 1935 | obj->ring = NULL; |
@@ -2640,7 +2637,7 @@ int i915_vma_unbind(struct i915_vma *vma) | |||
2640 | i915_gem_gtt_finish_object(obj); | 2637 | i915_gem_gtt_finish_object(obj); |
2641 | i915_gem_object_unpin_pages(obj); | 2638 | i915_gem_object_unpin_pages(obj); |
2642 | 2639 | ||
2643 | list_del(&obj->mm_list); | 2640 | list_del(&vma->mm_list); |
2644 | /* Avoid an unnecessary call to unbind on rebind. */ | 2641 | /* Avoid an unnecessary call to unbind on rebind. */ |
2645 | if (i915_is_ggtt(vma->vm)) | 2642 | if (i915_is_ggtt(vma->vm)) |
2646 | obj->map_and_fenceable = true; | 2643 | obj->map_and_fenceable = true; |
@@ -3187,7 +3184,7 @@ search_free: | |||
3187 | goto err_remove_node; | 3184 | goto err_remove_node; |
3188 | 3185 | ||
3189 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); | 3186 | list_move_tail(&obj->global_list, &dev_priv->mm.bound_list); |
3190 | list_add_tail(&obj->mm_list, &vm->inactive_list); | 3187 | list_add_tail(&vma->mm_list, &vm->inactive_list); |
3191 | 3188 | ||
3192 | /* Keep GGTT vmas first to make debug easier */ | 3189 | /* Keep GGTT vmas first to make debug easier */ |
3193 | if (i915_is_ggtt(vm)) | 3190 | if (i915_is_ggtt(vm)) |
@@ -3352,9 +3349,14 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write) | |||
3352 | old_write_domain); | 3349 | old_write_domain); |
3353 | 3350 | ||
3354 | /* And bump the LRU for this access */ | 3351 | /* And bump the LRU for this access */ |
3355 | if (i915_gem_object_is_inactive(obj)) | 3352 | if (i915_gem_object_is_inactive(obj)) { |
3356 | list_move_tail(&obj->mm_list, | 3353 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, |
3357 | &dev_priv->gtt.base.inactive_list); | 3354 | &dev_priv->gtt.base); |
3355 | if (vma) | ||
3356 | list_move_tail(&vma->mm_list, | ||
3357 | &dev_priv->gtt.base.inactive_list); | ||
3358 | |||
3359 | } | ||
3358 | 3360 | ||
3359 | return 0; | 3361 | return 0; |
3360 | } | 3362 | } |
@@ -3927,7 +3929,6 @@ unlock: | |||
3927 | void i915_gem_object_init(struct drm_i915_gem_object *obj, | 3929 | void i915_gem_object_init(struct drm_i915_gem_object *obj, |
3928 | const struct drm_i915_gem_object_ops *ops) | 3930 | const struct drm_i915_gem_object_ops *ops) |
3929 | { | 3931 | { |
3930 | INIT_LIST_HEAD(&obj->mm_list); | ||
3931 | INIT_LIST_HEAD(&obj->global_list); | 3932 | INIT_LIST_HEAD(&obj->global_list); |
3932 | INIT_LIST_HEAD(&obj->ring_list); | 3933 | INIT_LIST_HEAD(&obj->ring_list); |
3933 | INIT_LIST_HEAD(&obj->exec_list); | 3934 | INIT_LIST_HEAD(&obj->exec_list); |
@@ -4069,6 +4070,7 @@ struct i915_vma *i915_gem_vma_create(struct drm_i915_gem_object *obj, | |||
4069 | return ERR_PTR(-ENOMEM); | 4070 | return ERR_PTR(-ENOMEM); |
4070 | 4071 | ||
4071 | INIT_LIST_HEAD(&vma->vma_link); | 4072 | INIT_LIST_HEAD(&vma->vma_link); |
4073 | INIT_LIST_HEAD(&vma->mm_list); | ||
4072 | vma->vm = vm; | 4074 | vma->vm = vm; |
4073 | vma->obj = obj; | 4075 | vma->obj = obj; |
4074 | 4076 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c index 7273a729a039..403309c2a7d6 100644 --- a/drivers/gpu/drm/i915/i915_gem_context.c +++ b/drivers/gpu/drm/i915/i915_gem_context.c | |||
@@ -436,7 +436,10 @@ static int do_switch(struct i915_hw_context *to) | |||
436 | * MI_SET_CONTEXT instead of when the next seqno has completed. | 436 | * MI_SET_CONTEXT instead of when the next seqno has completed. |
437 | */ | 437 | */ |
438 | if (from != NULL) { | 438 | if (from != NULL) { |
439 | struct drm_i915_private *dev_priv = from->obj->base.dev->dev_private; | ||
440 | struct i915_address_space *ggtt = &dev_priv->gtt.base; | ||
439 | from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; | 441 | from->obj->base.read_domains = I915_GEM_DOMAIN_INSTRUCTION; |
442 | list_move_tail(&i915_gem_obj_to_vma(from->obj, ggtt)->mm_list, &ggtt->active_list); | ||
440 | i915_gem_object_move_to_active(from->obj, ring); | 443 | i915_gem_object_move_to_active(from->obj, ring); |
441 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the | 444 | /* As long as MI_SET_CONTEXT is serializing, ie. it flushes the |
442 | * whole damn pipeline, we don't need to explicitly mark the | 445 | * whole damn pipeline, we don't need to explicitly mark the |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 61bf5e20e5e0..425939b7d343 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -87,8 +87,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | |||
87 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); | 87 | drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level); |
88 | 88 | ||
89 | /* First see if there is a large enough contiguous idle region... */ | 89 | /* First see if there is a large enough contiguous idle region... */ |
90 | list_for_each_entry(obj, &vm->inactive_list, mm_list) { | 90 | list_for_each_entry(vma, &vm->inactive_list, mm_list) { |
91 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); | ||
92 | if (mark_free(vma, &unwind_list)) | 91 | if (mark_free(vma, &unwind_list)) |
93 | goto found; | 92 | goto found; |
94 | } | 93 | } |
@@ -97,8 +96,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm, | |||
97 | goto none; | 96 | goto none; |
98 | 97 | ||
99 | /* Now merge in the soon-to-be-expired objects... */ | 98 | /* Now merge in the soon-to-be-expired objects... */ |
100 | list_for_each_entry(obj, &vm->active_list, mm_list) { | 99 | list_for_each_entry(vma, &vm->active_list, mm_list) { |
101 | struct i915_vma *vma = i915_gem_obj_to_vma(obj, vm); | ||
102 | if (mark_free(vma, &unwind_list)) | 100 | if (mark_free(vma, &unwind_list)) |
103 | goto found; | 101 | goto found; |
104 | } | 102 | } |
@@ -159,7 +157,7 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
159 | { | 157 | { |
160 | drm_i915_private_t *dev_priv = dev->dev_private; | 158 | drm_i915_private_t *dev_priv = dev->dev_private; |
161 | struct i915_address_space *vm; | 159 | struct i915_address_space *vm; |
162 | struct drm_i915_gem_object *obj, *next; | 160 | struct i915_vma *vma, *next; |
163 | bool lists_empty = true; | 161 | bool lists_empty = true; |
164 | int ret; | 162 | int ret; |
165 | 163 | ||
@@ -187,9 +185,9 @@ i915_gem_evict_everything(struct drm_device *dev) | |||
187 | 185 | ||
188 | /* Having flushed everything, unbind() should never raise an error */ | 186 | /* Having flushed everything, unbind() should never raise an error */ |
189 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { | 187 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
190 | list_for_each_entry_safe(obj, next, &vm->inactive_list, mm_list) | 188 | list_for_each_entry_safe(vma, next, &vm->inactive_list, mm_list) |
191 | if (obj->pin_count == 0) | 189 | if (vma->obj->pin_count == 0) |
192 | WARN_ON(i915_vma_unbind(i915_gem_obj_to_vma(obj, vm))); | 190 | WARN_ON(i915_vma_unbind(vma)); |
193 | } | 191 | } |
194 | 192 | ||
195 | return 0; | 193 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index aa3fa9425cae..8ccc29ac9629 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -801,6 +801,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
801 | obj->base.read_domains = obj->base.pending_read_domains; | 801 | obj->base.read_domains = obj->base.pending_read_domains; |
802 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; | 802 | obj->fenced_gpu_access = obj->pending_fenced_gpu_access; |
803 | 803 | ||
804 | /* FIXME: This lookup gets fixed later <-- danvet */ | ||
805 | list_move_tail(&i915_gem_obj_to_vma(obj, vm)->mm_list, &vm->active_list); | ||
804 | i915_gem_object_move_to_active(obj, ring); | 806 | i915_gem_object_move_to_active(obj, ring); |
805 | if (obj->base.write_domain) { | 807 | if (obj->base.write_domain) { |
806 | obj->dirty = 1; | 808 | obj->dirty = 1; |
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 934840860c6d..e68c4b5da46d 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c | |||
@@ -401,7 +401,7 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev, | |||
401 | obj->has_global_gtt_mapping = 1; | 401 | obj->has_global_gtt_mapping = 1; |
402 | 402 | ||
403 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); | 403 | list_add_tail(&obj->global_list, &dev_priv->mm.bound_list); |
404 | list_add_tail(&obj->mm_list, &ggtt->inactive_list); | 404 | list_add_tail(&vma->mm_list, &ggtt->inactive_list); |
405 | 405 | ||
406 | return obj; | 406 | return obj; |
407 | 407 | ||
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 8091485e7e88..fad48b2bb870 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c | |||
@@ -556,11 +556,11 @@ static void capture_bo(struct drm_i915_error_buffer *err, | |||
556 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, | 556 | static u32 capture_active_bo(struct drm_i915_error_buffer *err, |
557 | int count, struct list_head *head) | 557 | int count, struct list_head *head) |
558 | { | 558 | { |
559 | struct drm_i915_gem_object *obj; | 559 | struct i915_vma *vma; |
560 | int i = 0; | 560 | int i = 0; |
561 | 561 | ||
562 | list_for_each_entry(obj, head, mm_list) { | 562 | list_for_each_entry(vma, head, mm_list) { |
563 | capture_bo(err++, obj); | 563 | capture_bo(err++, vma->obj); |
564 | if (++i == count) | 564 | if (++i == count) |
565 | break; | 565 | break; |
566 | } | 566 | } |
@@ -622,7 +622,8 @@ static struct drm_i915_error_object * | |||
622 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | 622 | i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, |
623 | struct intel_ring_buffer *ring) | 623 | struct intel_ring_buffer *ring) |
624 | { | 624 | { |
625 | struct i915_address_space *vm = &dev_priv->gtt.base; | 625 | struct i915_address_space *vm; |
626 | struct i915_vma *vma; | ||
626 | struct drm_i915_gem_object *obj; | 627 | struct drm_i915_gem_object *obj; |
627 | u32 seqno; | 628 | u32 seqno; |
628 | 629 | ||
@@ -642,20 +643,23 @@ i915_error_first_batchbuffer(struct drm_i915_private *dev_priv, | |||
642 | } | 643 | } |
643 | 644 | ||
644 | seqno = ring->get_seqno(ring, false); | 645 | seqno = ring->get_seqno(ring, false); |
645 | list_for_each_entry(obj, &vm->active_list, mm_list) { | 646 | list_for_each_entry(vm, &dev_priv->vm_list, global_link) { |
646 | if (obj->ring != ring) | 647 | list_for_each_entry(vma, &vm->active_list, mm_list) { |
647 | continue; | 648 | obj = vma->obj; |
649 | if (obj->ring != ring) | ||
650 | continue; | ||
648 | 651 | ||
649 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) | 652 | if (i915_seqno_passed(seqno, obj->last_read_seqno)) |
650 | continue; | 653 | continue; |
651 | 654 | ||
652 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) | 655 | if ((obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) == 0) |
653 | continue; | 656 | continue; |
654 | 657 | ||
655 | /* We need to copy these to an anonymous buffer as the simplest | 658 | /* We need to copy these to an anonymous buffer as the simplest |
656 | * method to avoid being overwritten by userspace. | 659 | * method to avoid being overwritten by userspace. |
657 | */ | 660 | */ |
658 | return i915_error_object_create(dev_priv, obj); | 661 | return i915_error_object_create(dev_priv, obj); |
662 | } | ||
659 | } | 663 | } |
660 | 664 | ||
661 | return NULL; | 665 | return NULL; |
@@ -775,11 +779,12 @@ static void i915_gem_capture_buffers(struct drm_i915_private *dev_priv, | |||
775 | struct drm_i915_error_state *error) | 779 | struct drm_i915_error_state *error) |
776 | { | 780 | { |
777 | struct i915_address_space *vm = &dev_priv->gtt.base; | 781 | struct i915_address_space *vm = &dev_priv->gtt.base; |
782 | struct i915_vma *vma; | ||
778 | struct drm_i915_gem_object *obj; | 783 | struct drm_i915_gem_object *obj; |
779 | int i; | 784 | int i; |
780 | 785 | ||
781 | i = 0; | 786 | i = 0; |
782 | list_for_each_entry(obj, &vm->active_list, mm_list) | 787 | list_for_each_entry(vma, &vm->active_list, mm_list) |
783 | i++; | 788 | i++; |
784 | error->active_bo_count = i; | 789 | error->active_bo_count = i; |
785 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) | 790 | list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) |