aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c23
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h14
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c47
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c59
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c11
5 files changed, 58 insertions, 96 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d598070fb279..f9e3295f0457 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -41,8 +41,7 @@
41#if defined(CONFIG_DEBUG_FS) 41#if defined(CONFIG_DEBUG_FS)
42 42
43enum { 43enum {
44 RENDER_LIST, 44 ACTIVE_LIST,
45 BSD_LIST,
46 FLUSHING_LIST, 45 FLUSHING_LIST,
47 INACTIVE_LIST, 46 INACTIVE_LIST,
48 PINNED_LIST, 47 PINNED_LIST,
@@ -125,6 +124,8 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
125 seq_printf(m, " (fence: %d)", obj->fence_reg); 124 seq_printf(m, " (fence: %d)", obj->fence_reg);
126 if (obj->gtt_space != NULL) 125 if (obj->gtt_space != NULL)
127 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset); 126 seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
127 if (obj->ring != NULL)
128 seq_printf(m, " (%s)", obj->ring->name);
128} 129}
129 130
130static int i915_gem_object_list_info(struct seq_file *m, void *data) 131static int i915_gem_object_list_info(struct seq_file *m, void *data)
@@ -143,13 +144,9 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
143 return ret; 144 return ret;
144 145
145 switch (list) { 146 switch (list) {
146 case RENDER_LIST: 147 case ACTIVE_LIST:
147 seq_printf(m, "Render:\n"); 148 seq_printf(m, "Active:\n");
148 head = &dev_priv->render_ring.active_list; 149 head = &dev_priv->mm.active_list;
149 break;
150 case BSD_LIST:
151 seq_printf(m, "BSD:\n");
152 head = &dev_priv->bsd_ring.active_list;
153 break; 150 break;
154 case INACTIVE_LIST: 151 case INACTIVE_LIST:
155 seq_printf(m, "Inactive:\n"); 152 seq_printf(m, "Inactive:\n");
@@ -173,7 +170,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
173 } 170 }
174 171
175 total_obj_size = total_gtt_size = count = 0; 172 total_obj_size = total_gtt_size = count = 0;
176 list_for_each_entry(obj_priv, head, list) { 173 list_for_each_entry(obj_priv, head, mm_list) {
177 seq_printf(m, " "); 174 seq_printf(m, " ");
178 describe_obj(m, obj_priv); 175 describe_obj(m, obj_priv);
179 seq_printf(m, "\n"); 176 seq_printf(m, "\n");
@@ -460,8 +457,7 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
460 if (ret) 457 if (ret)
461 return ret; 458 return ret;
462 459
463 list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list, 460 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
464 list) {
465 obj = &obj_priv->base; 461 obj = &obj_priv->base;
466 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) { 462 if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
467 seq_printf(m, "--- gtt_offset = 0x%08x\n", 463 seq_printf(m, "--- gtt_offset = 0x%08x\n",
@@ -1020,8 +1016,7 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
1020static struct drm_info_list i915_debugfs_list[] = { 1016static struct drm_info_list i915_debugfs_list[] = {
1021 {"i915_capabilities", i915_capabilities, 0, 0}, 1017 {"i915_capabilities", i915_capabilities, 0, 0},
1022 {"i915_gem_objects", i915_gem_object_info, 0}, 1018 {"i915_gem_objects", i915_gem_object_info, 0},
1023 {"i915_gem_render_active", i915_gem_object_list_info, 0, (void *) RENDER_LIST}, 1019 {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
1024 {"i915_gem_bsd_active", i915_gem_object_list_info, 0, (void *) BSD_LIST},
1025 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, 1020 {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
1026 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, 1021 {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
1027 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST}, 1022 {"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 84e33aeececd..817d8be6ff49 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -548,6 +548,17 @@ typedef struct drm_i915_private {
548 struct list_head shrink_list; 548 struct list_head shrink_list;
549 549
550 /** 550 /**
551 * List of objects currently involved in rendering.
552 *
553 * Includes buffers having the contents of their GPU caches
554 * flushed, not necessarily primitives. last_rendering_seqno
555 * represents when the rendering involved will be completed.
556 *
557 * A reference is held on the buffer while on this list.
558 */
559 struct list_head active_list;
560
561 /**
551 * List of objects which are not in the ringbuffer but which 562 * List of objects which are not in the ringbuffer but which
552 * still have a write_domain which needs to be flushed before 563 * still have a write_domain which needs to be flushed before
553 * unbinding. 564 * unbinding.
@@ -714,7 +725,8 @@ struct drm_i915_gem_object {
714 struct drm_mm_node *gtt_space; 725 struct drm_mm_node *gtt_space;
715 726
716 /** This object's place on the active/flushing/inactive lists */ 727 /** This object's place on the active/flushing/inactive lists */
717 struct list_head list; 728 struct list_head ring_list;
729 struct list_head mm_list;
718 /** This object's place on GPU write list */ 730 /** This object's place on GPU write list */
719 struct list_head gpu_write_list; 731 struct list_head gpu_write_list;
720 /** This object's place on eviction list */ 732 /** This object's place on eviction list */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 56153685d145..6e85496f9164 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1139,7 +1139,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
1139 1139
1140 /* Maintain LRU order of "inactive" objects */ 1140 /* Maintain LRU order of "inactive" objects */
1141 if (ret == 0 && i915_gem_object_is_inactive(obj_priv)) 1141 if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
1142 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1142 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1143 1143
1144 drm_gem_object_unreference(obj); 1144 drm_gem_object_unreference(obj);
1145unlock: 1145unlock:
@@ -1271,7 +1271,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1271 } 1271 }
1272 1272
1273 if (i915_gem_object_is_inactive(obj_priv)) 1273 if (i915_gem_object_is_inactive(obj_priv))
1274 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1274 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1275 1275
1276 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 1276 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
1277 page_offset; 1277 page_offset;
@@ -1565,6 +1565,7 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj,
1565 struct intel_ring_buffer *ring) 1565 struct intel_ring_buffer *ring)
1566{ 1566{
1567 struct drm_device *dev = obj->dev; 1567 struct drm_device *dev = obj->dev;
1568 struct drm_i915_private *dev_priv = dev->dev_private;
1568 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1569 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1569 uint32_t seqno = i915_gem_next_request_seqno(dev, ring); 1570 uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
1570 1571
@@ -1578,7 +1579,8 @@ i915_gem_object_move_to_active(struct drm_gem_object *obj,
1578 } 1579 }
1579 1580
1580 /* Move from whatever list we were on to the tail of execution. */ 1581 /* Move from whatever list we were on to the tail of execution. */
1581 list_move_tail(&obj_priv->list, &ring->active_list); 1582 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
1583 list_move_tail(&obj_priv->ring_list, &ring->active_list);
1582 obj_priv->last_rendering_seqno = seqno; 1584 obj_priv->last_rendering_seqno = seqno;
1583} 1585}
1584 1586
@@ -1590,7 +1592,8 @@ i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
1590 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1592 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1591 1593
1592 BUG_ON(!obj_priv->active); 1594 BUG_ON(!obj_priv->active);
1593 list_move_tail(&obj_priv->list, &dev_priv->mm.flushing_list); 1595 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
1596 list_del_init(&obj_priv->ring_list);
1594 obj_priv->last_rendering_seqno = 0; 1597 obj_priv->last_rendering_seqno = 0;
1595} 1598}
1596 1599
@@ -1629,9 +1632,10 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1629 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 1632 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
1630 1633
1631 if (obj_priv->pin_count != 0) 1634 if (obj_priv->pin_count != 0)
1632 list_move_tail(&obj_priv->list, &dev_priv->mm.pinned_list); 1635 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
1633 else 1636 else
1634 list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 1637 list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
1638 list_del_init(&obj_priv->ring_list);
1635 1639
1636 BUG_ON(!list_empty(&obj_priv->gpu_write_list)); 1640 BUG_ON(!list_empty(&obj_priv->gpu_write_list));
1637 1641
@@ -1780,7 +1784,7 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
1780 1784
1781 obj_priv = list_first_entry(&ring->active_list, 1785 obj_priv = list_first_entry(&ring->active_list,
1782 struct drm_i915_gem_object, 1786 struct drm_i915_gem_object,
1783 list); 1787 ring_list);
1784 1788
1785 obj_priv->base.write_domain = 0; 1789 obj_priv->base.write_domain = 0;
1786 list_del_init(&obj_priv->gpu_write_list); 1790 list_del_init(&obj_priv->gpu_write_list);
@@ -1804,7 +1808,7 @@ void i915_gem_reset(struct drm_device *dev)
1804 while (!list_empty(&dev_priv->mm.flushing_list)) { 1808 while (!list_empty(&dev_priv->mm.flushing_list)) {
1805 obj_priv = list_first_entry(&dev_priv->mm.flushing_list, 1809 obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
1806 struct drm_i915_gem_object, 1810 struct drm_i915_gem_object,
1807 list); 1811 mm_list);
1808 1812
1809 obj_priv->base.write_domain = 0; 1813 obj_priv->base.write_domain = 0;
1810 list_del_init(&obj_priv->gpu_write_list); 1814 list_del_init(&obj_priv->gpu_write_list);
@@ -1816,7 +1820,7 @@ void i915_gem_reset(struct drm_device *dev)
1816 */ 1820 */
1817 list_for_each_entry(obj_priv, 1821 list_for_each_entry(obj_priv,
1818 &dev_priv->mm.inactive_list, 1822 &dev_priv->mm.inactive_list,
1819 list) 1823 mm_list)
1820 { 1824 {
1821 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS; 1825 obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
1822 } 1826 }
@@ -1876,7 +1880,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
1876 1880
1877 obj_priv = list_first_entry(&ring->active_list, 1881 obj_priv = list_first_entry(&ring->active_list,
1878 struct drm_i915_gem_object, 1882 struct drm_i915_gem_object,
1879 list); 1883 ring_list);
1880 1884
1881 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno)) 1885 if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
1882 break; 1886 break;
@@ -1912,7 +1916,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1912 */ 1916 */
1913 list_for_each_entry_safe(obj_priv, tmp, 1917 list_for_each_entry_safe(obj_priv, tmp,
1914 &dev_priv->mm.deferred_free_list, 1918 &dev_priv->mm.deferred_free_list,
1915 list) 1919 mm_list)
1916 i915_gem_free_object_tail(&obj_priv->base); 1920 i915_gem_free_object_tail(&obj_priv->base);
1917 } 1921 }
1918 1922
@@ -2145,7 +2149,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2145 BUG_ON(obj_priv->pages_refcount); 2149 BUG_ON(obj_priv->pages_refcount);
2146 2150
2147 i915_gem_info_remove_gtt(dev_priv, obj->size); 2151 i915_gem_info_remove_gtt(dev_priv, obj->size);
2148 list_del_init(&obj_priv->list); 2152 list_del_init(&obj_priv->mm_list);
2149 2153
2150 drm_mm_put_block(obj_priv->gtt_space); 2154 drm_mm_put_block(obj_priv->gtt_space);
2151 obj_priv->gtt_space = NULL; 2155 obj_priv->gtt_space = NULL;
@@ -2700,7 +2704,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2700 } 2704 }
2701 2705
2702 /* keep track of bounds object by adding it to the inactive list */ 2706 /* keep track of bounds object by adding it to the inactive list */
2703 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list); 2707 list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
2704 i915_gem_info_add_gtt(dev_priv, obj->size); 2708 i915_gem_info_add_gtt(dev_priv, obj->size);
2705 2709
2706 /* Assert that the object is not currently in any GPU domain. As it 2710 /* Assert that the object is not currently in any GPU domain. As it
@@ -4022,7 +4026,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4022 if (obj_priv->pin_count == 1) { 4026 if (obj_priv->pin_count == 1) {
4023 i915_gem_info_add_pin(dev_priv, obj->size); 4027 i915_gem_info_add_pin(dev_priv, obj->size);
4024 if (!obj_priv->active) 4028 if (!obj_priv->active)
4025 list_move_tail(&obj_priv->list, 4029 list_move_tail(&obj_priv->mm_list,
4026 &dev_priv->mm.pinned_list); 4030 &dev_priv->mm.pinned_list);
4027 } 4031 }
4028 4032
@@ -4048,7 +4052,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
4048 */ 4052 */
4049 if (obj_priv->pin_count == 0) { 4053 if (obj_priv->pin_count == 0) {
4050 if (!obj_priv->active) 4054 if (!obj_priv->active)
4051 list_move_tail(&obj_priv->list, 4055 list_move_tail(&obj_priv->mm_list,
4052 &dev_priv->mm.inactive_list); 4056 &dev_priv->mm.inactive_list);
4053 i915_gem_info_remove_pin(dev_priv, obj->size); 4057 i915_gem_info_remove_pin(dev_priv, obj->size);
4054 } 4058 }
@@ -4280,7 +4284,8 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
4280 obj->agp_type = AGP_USER_MEMORY; 4284 obj->agp_type = AGP_USER_MEMORY;
4281 obj->base.driver_private = NULL; 4285 obj->base.driver_private = NULL;
4282 obj->fence_reg = I915_FENCE_REG_NONE; 4286 obj->fence_reg = I915_FENCE_REG_NONE;
4283 INIT_LIST_HEAD(&obj->list); 4287 INIT_LIST_HEAD(&obj->mm_list);
4288 INIT_LIST_HEAD(&obj->ring_list);
4284 INIT_LIST_HEAD(&obj->gpu_write_list); 4289 INIT_LIST_HEAD(&obj->gpu_write_list);
4285 obj->madv = I915_MADV_WILLNEED; 4290 obj->madv = I915_MADV_WILLNEED;
4286 4291
@@ -4303,7 +4308,7 @@ static void i915_gem_free_object_tail(struct drm_gem_object *obj)
4303 4308
4304 ret = i915_gem_object_unbind(obj); 4309 ret = i915_gem_object_unbind(obj);
4305 if (ret == -ERESTARTSYS) { 4310 if (ret == -ERESTARTSYS) {
4306 list_move(&obj_priv->list, 4311 list_move(&obj_priv->mm_list,
4307 &dev_priv->mm.deferred_free_list); 4312 &dev_priv->mm.deferred_free_list);
4308 return; 4313 return;
4309 } 4314 }
@@ -4511,6 +4516,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
4511 return ret; 4516 return ret;
4512 } 4517 }
4513 4518
4519 BUG_ON(!list_empty(&dev_priv->mm.active_list));
4514 BUG_ON(!list_empty(&dev_priv->render_ring.active_list)); 4520 BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
4515 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list)); 4521 BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
4516 BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); 4522 BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
@@ -4564,6 +4570,7 @@ i915_gem_load(struct drm_device *dev)
4564 int i; 4570 int i;
4565 drm_i915_private_t *dev_priv = dev->dev_private; 4571 drm_i915_private_t *dev_priv = dev->dev_private;
4566 4572
4573 INIT_LIST_HEAD(&dev_priv->mm.active_list);
4567 INIT_LIST_HEAD(&dev_priv->mm.flushing_list); 4574 INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
4568 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); 4575 INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list);
4569 INIT_LIST_HEAD(&dev_priv->mm.inactive_list); 4576 INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
@@ -4859,7 +4866,7 @@ i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
4859 if (mutex_trylock(&dev->struct_mutex)) { 4866 if (mutex_trylock(&dev->struct_mutex)) {
4860 list_for_each_entry(obj_priv, 4867 list_for_each_entry(obj_priv,
4861 &dev_priv->mm.inactive_list, 4868 &dev_priv->mm.inactive_list,
4862 list) 4869 mm_list)
4863 cnt++; 4870 cnt++;
4864 mutex_unlock(&dev->struct_mutex); 4871 mutex_unlock(&dev->struct_mutex);
4865 } 4872 }
@@ -4885,7 +4892,7 @@ rescan:
4885 4892
4886 list_for_each_entry_safe(obj_priv, next_obj, 4893 list_for_each_entry_safe(obj_priv, next_obj,
4887 &dev_priv->mm.inactive_list, 4894 &dev_priv->mm.inactive_list,
4888 list) { 4895 mm_list) {
4889 if (i915_gem_object_is_purgeable(obj_priv)) { 4896 if (i915_gem_object_is_purgeable(obj_priv)) {
4890 i915_gem_object_unbind(&obj_priv->base); 4897 i915_gem_object_unbind(&obj_priv->base);
4891 if (--nr_to_scan <= 0) 4898 if (--nr_to_scan <= 0)
@@ -4914,7 +4921,7 @@ rescan:
4914 4921
4915 list_for_each_entry_safe(obj_priv, next_obj, 4922 list_for_each_entry_safe(obj_priv, next_obj,
4916 &dev_priv->mm.inactive_list, 4923 &dev_priv->mm.inactive_list,
4917 list) { 4924 mm_list) {
4918 if (nr_to_scan > 0) { 4925 if (nr_to_scan > 0) {
4919 i915_gem_object_unbind(&obj_priv->base); 4926 i915_gem_object_unbind(&obj_priv->base);
4920 nr_to_scan--; 4927 nr_to_scan--;
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d2733a1e2bcc..70db2f1ee369 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -31,49 +31,6 @@
31#include "i915_drv.h" 31#include "i915_drv.h"
32#include "i915_drm.h" 32#include "i915_drm.h"
33 33
34static struct drm_i915_gem_object *
35i915_gem_next_active_object(struct drm_device *dev,
36 struct list_head **render_iter,
37 struct list_head **bsd_iter)
38{
39 drm_i915_private_t *dev_priv = dev->dev_private;
40 struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
41
42 if (*render_iter != &dev_priv->render_ring.active_list)
43 render_obj = list_entry(*render_iter,
44 struct drm_i915_gem_object,
45 list);
46
47 if (HAS_BSD(dev)) {
48 if (*bsd_iter != &dev_priv->bsd_ring.active_list)
49 bsd_obj = list_entry(*bsd_iter,
50 struct drm_i915_gem_object,
51 list);
52
53 if (render_obj == NULL) {
54 *bsd_iter = (*bsd_iter)->next;
55 return bsd_obj;
56 }
57
58 if (bsd_obj == NULL) {
59 *render_iter = (*render_iter)->next;
60 return render_obj;
61 }
62
63 /* XXX can we handle seqno wrapping? */
64 if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
65 *render_iter = (*render_iter)->next;
66 return render_obj;
67 } else {
68 *bsd_iter = (*bsd_iter)->next;
69 return bsd_obj;
70 }
71 } else {
72 *render_iter = (*render_iter)->next;
73 return render_obj;
74 }
75}
76
77static bool 34static bool
78mark_free(struct drm_i915_gem_object *obj_priv, 35mark_free(struct drm_i915_gem_object *obj_priv,
79 struct list_head *unwind) 36 struct list_head *unwind)
@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
83 return drm_mm_scan_add_block(obj_priv->gtt_space); 40 return drm_mm_scan_add_block(obj_priv->gtt_space);
84} 41}
85 42
86#define i915_for_each_active_object(OBJ, R, B) \
87 *(R) = dev_priv->render_ring.active_list.next; \
88 *(B) = dev_priv->bsd_ring.active_list.next; \
89 while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
90
91int 43int
92i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment) 44i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
93{ 45{
94 drm_i915_private_t *dev_priv = dev->dev_private; 46 drm_i915_private_t *dev_priv = dev->dev_private;
95 struct list_head eviction_list, unwind_list; 47 struct list_head eviction_list, unwind_list;
96 struct drm_i915_gem_object *obj_priv; 48 struct drm_i915_gem_object *obj_priv;
97 struct list_head *render_iter, *bsd_iter;
98 int ret = 0; 49 int ret = 0;
99 50
100 i915_gem_retire_requests(dev); 51 i915_gem_retire_requests(dev);
@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
131 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment); 82 drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
132 83
133 /* First see if there is a large enough contiguous idle region... */ 84 /* First see if there is a large enough contiguous idle region... */
134 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 85 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
135 if (mark_free(obj_priv, &unwind_list)) 86 if (mark_free(obj_priv, &unwind_list))
136 goto found; 87 goto found;
137 } 88 }
138 89
139 /* Now merge in the soon-to-be-expired objects... */ 90 /* Now merge in the soon-to-be-expired objects... */
140 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { 91 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
141 /* Does the object require an outstanding flush? */ 92 /* Does the object require an outstanding flush? */
142 if (obj_priv->base.write_domain || obj_priv->pin_count) 93 if (obj_priv->base.write_domain || obj_priv->pin_count)
143 continue; 94 continue;
@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
147 } 98 }
148 99
149 /* Finally add anything with a pending flush (in order of retirement) */ 100 /* Finally add anything with a pending flush (in order of retirement) */
150 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 101 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
151 if (obj_priv->pin_count) 102 if (obj_priv->pin_count)
152 continue; 103 continue;
153 104
154 if (mark_free(obj_priv, &unwind_list)) 105 if (mark_free(obj_priv, &unwind_list))
155 goto found; 106 goto found;
156 } 107 }
157 i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) { 108 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
158 if (! obj_priv->base.write_domain || obj_priv->pin_count) 109 if (! obj_priv->base.write_domain || obj_priv->pin_count)
159 continue; 110 continue;
160 111
@@ -251,7 +202,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
251 202
252 obj = &list_first_entry(&dev_priv->mm.inactive_list, 203 obj = &list_first_entry(&dev_priv->mm.inactive_list,
253 struct drm_i915_gem_object, 204 struct drm_i915_gem_object,
254 list)->base; 205 mm_list)->base;
255 206
256 ret = i915_gem_object_unbind(obj); 207 ret = i915_gem_object_unbind(obj);
257 if (ret != 0) { 208 if (ret != 0) {
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 1e30c250140b..f94cd7ffd74d 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -608,9 +608,7 @@ static void i915_capture_error_state(struct drm_device *dev)
608 batchbuffer[0] = NULL; 608 batchbuffer[0] = NULL;
609 batchbuffer[1] = NULL; 609 batchbuffer[1] = NULL;
610 count = 0; 610 count = 0;
611 list_for_each_entry(obj_priv, 611 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
612 &dev_priv->render_ring.active_list, list) {
613
614 struct drm_gem_object *obj = &obj_priv->base; 612 struct drm_gem_object *obj = &obj_priv->base;
615 613
616 if (batchbuffer[0] == NULL && 614 if (batchbuffer[0] == NULL &&
@@ -627,7 +625,7 @@ static void i915_capture_error_state(struct drm_device *dev)
627 } 625 }
628 /* Scan the other lists for completeness for those bizarre errors. */ 626 /* Scan the other lists for completeness for those bizarre errors. */
629 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 627 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
630 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) { 628 list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
631 struct drm_gem_object *obj = &obj_priv->base; 629 struct drm_gem_object *obj = &obj_priv->base;
632 630
633 if (batchbuffer[0] == NULL && 631 if (batchbuffer[0] == NULL &&
@@ -645,7 +643,7 @@ static void i915_capture_error_state(struct drm_device *dev)
645 } 643 }
646 } 644 }
647 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) { 645 if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
648 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { 646 list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
649 struct drm_gem_object *obj = &obj_priv->base; 647 struct drm_gem_object *obj = &obj_priv->base;
650 648
651 if (batchbuffer[0] == NULL && 649 if (batchbuffer[0] == NULL &&
@@ -686,8 +684,7 @@ static void i915_capture_error_state(struct drm_device *dev)
686 684
687 if (error->active_bo) { 685 if (error->active_bo) {
688 int i = 0; 686 int i = 0;
689 list_for_each_entry(obj_priv, 687 list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
690 &dev_priv->render_ring.active_list, list) {
691 struct drm_gem_object *obj = &obj_priv->base; 688 struct drm_gem_object *obj = &obj_priv->base;
692 689
693 error->active_bo[i].size = obj->size; 690 error->active_bo[i].size = obj->size;