diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-07-20 07:41:02 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-07-25 12:23:52 -0400 |
commit | 65ce3027415d4dc9ee18ef0a135214b4fb76730b (patch) | |
tree | 74b94b51e2aa3a124311f410f74d8d13c1161ad8 /drivers | |
parent | 0201f1ecf4b81f08799b1fb9c8cdf1125b9b78a6 (diff) |
drm/i915: Remove the defunct flushing list
As we guarantee to emit a flush before emitting the breadcrumb or
the next batchbuffer, there is no further need for the flushing list.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 59 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_evict.c | 20 |
4 files changed, 14 insertions, 91 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index a8b7db6161ca..1312b79c70b3 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -44,7 +44,6 @@ | |||
44 | 44 | ||
45 | enum { | 45 | enum { |
46 | ACTIVE_LIST, | 46 | ACTIVE_LIST, |
47 | FLUSHING_LIST, | ||
48 | INACTIVE_LIST, | 47 | INACTIVE_LIST, |
49 | PINNED_LIST, | 48 | PINNED_LIST, |
50 | }; | 49 | }; |
@@ -178,10 +177,6 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data) | |||
178 | seq_printf(m, "Inactive:\n"); | 177 | seq_printf(m, "Inactive:\n"); |
179 | head = &dev_priv->mm.inactive_list; | 178 | head = &dev_priv->mm.inactive_list; |
180 | break; | 179 | break; |
181 | case FLUSHING_LIST: | ||
182 | seq_printf(m, "Flushing:\n"); | ||
183 | head = &dev_priv->mm.flushing_list; | ||
184 | break; | ||
185 | default: | 180 | default: |
186 | mutex_unlock(&dev->struct_mutex); | 181 | mutex_unlock(&dev->struct_mutex); |
187 | return -EINVAL; | 182 | return -EINVAL; |
@@ -239,7 +234,6 @@ static int i915_gem_object_info(struct seq_file *m, void* data) | |||
239 | 234 | ||
240 | size = count = mappable_size = mappable_count = 0; | 235 | size = count = mappable_size = mappable_count = 0; |
241 | count_objects(&dev_priv->mm.active_list, mm_list); | 236 | count_objects(&dev_priv->mm.active_list, mm_list); |
242 | count_objects(&dev_priv->mm.flushing_list, mm_list); | ||
243 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", | 237 | seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n", |
244 | count, mappable_count, size, mappable_size); | 238 | count, mappable_count, size, mappable_size); |
245 | 239 | ||
@@ -2007,7 +2001,6 @@ static struct drm_info_list i915_debugfs_list[] = { | |||
2007 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, | 2001 | {"i915_gem_gtt", i915_gem_gtt_info, 0}, |
2008 | {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, | 2002 | {"i915_gem_pinned", i915_gem_gtt_info, 0, (void *) PINNED_LIST}, |
2009 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, | 2003 | {"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST}, |
2010 | {"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST}, | ||
2011 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, | 2004 | {"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST}, |
2012 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, | 2005 | {"i915_gem_pageflip", i915_gem_pageflip_info, 0}, |
2013 | {"i915_gem_request", i915_gem_request_info, 0}, | 2006 | {"i915_gem_request", i915_gem_request_info, 0}, |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 49a532e338e6..6b91755f7743 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -696,17 +696,6 @@ typedef struct drm_i915_private { | |||
696 | struct list_head active_list; | 696 | struct list_head active_list; |
697 | 697 | ||
698 | /** | 698 | /** |
699 | * List of objects which are not in the ringbuffer but which | ||
700 | * still have a write_domain which needs to be flushed before | ||
701 | * unbinding. | ||
702 | * | ||
703 | * last_rendering_seqno is 0 while an object is in this list. | ||
704 | * | ||
705 | * A reference is held on the buffer while on this list. | ||
706 | */ | ||
707 | struct list_head flushing_list; | ||
708 | |||
709 | /** | ||
710 | * LRU list of objects which are not in the ringbuffer and | 699 | * LRU list of objects which are not in the ringbuffer and |
711 | * are ready to unbind, but are still in the GTT. | 700 | * are ready to unbind, but are still in the GTT. |
712 | * | 701 | * |
@@ -873,7 +862,7 @@ struct drm_i915_gem_object { | |||
873 | struct drm_mm_node *gtt_space; | 862 | struct drm_mm_node *gtt_space; |
874 | struct list_head gtt_list; | 863 | struct list_head gtt_list; |
875 | 864 | ||
876 | /** This object's place on the active/flushing/inactive lists */ | 865 | /** This object's place on the active/inactive lists */ |
877 | struct list_head ring_list; | 866 | struct list_head ring_list; |
878 | struct list_head mm_list; | 867 | struct list_head mm_list; |
879 | /** This object's place on GPU write list */ | 868 | /** This object's place on GPU write list */ |
@@ -882,9 +871,9 @@ struct drm_i915_gem_object { | |||
882 | struct list_head exec_list; | 871 | struct list_head exec_list; |
883 | 872 | ||
884 | /** | 873 | /** |
885 | * This is set if the object is on the active or flushing lists | 874 | * This is set if the object is on the active lists (has pending |
886 | * (has pending rendering), and is not set if it's on inactive (ready | 875 | * rendering and so a non-zero seqno), and is not set if it i s on |
887 | * to be unbound). | 876 | * inactive (ready to be unbound) list. |
888 | */ | 877 | */ |
889 | unsigned int active:1; | 878 | unsigned int active:1; |
890 | 879 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6a80d6565ef2..f62dd298a65d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1458,27 +1458,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1458 | } | 1458 | } |
1459 | 1459 | ||
1460 | static void | 1460 | static void |
1461 | i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) | ||
1462 | { | ||
1463 | list_del_init(&obj->ring_list); | ||
1464 | obj->last_read_seqno = 0; | ||
1465 | obj->last_write_seqno = 0; | ||
1466 | obj->last_fenced_seqno = 0; | ||
1467 | } | ||
1468 | |||
1469 | static void | ||
1470 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) | ||
1471 | { | ||
1472 | struct drm_device *dev = obj->base.dev; | ||
1473 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1474 | |||
1475 | BUG_ON(!obj->active); | ||
1476 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); | ||
1477 | |||
1478 | i915_gem_object_move_off_active(obj); | ||
1479 | } | ||
1480 | |||
1481 | static void | ||
1482 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | 1461 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1483 | { | 1462 | { |
1484 | struct drm_device *dev = obj->base.dev; | 1463 | struct drm_device *dev = obj->base.dev; |
@@ -1487,10 +1466,17 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
1487 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 1466 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1488 | 1467 | ||
1489 | BUG_ON(!list_empty(&obj->gpu_write_list)); | 1468 | BUG_ON(!list_empty(&obj->gpu_write_list)); |
1469 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | ||
1490 | BUG_ON(!obj->active); | 1470 | BUG_ON(!obj->active); |
1471 | |||
1472 | list_del_init(&obj->ring_list); | ||
1491 | obj->ring = NULL; | 1473 | obj->ring = NULL; |
1492 | 1474 | ||
1493 | i915_gem_object_move_off_active(obj); | 1475 | obj->last_read_seqno = 0; |
1476 | obj->last_write_seqno = 0; | ||
1477 | obj->base.write_domain = 0; | ||
1478 | |||
1479 | obj->last_fenced_seqno = 0; | ||
1494 | obj->fenced_gpu_access = false; | 1480 | obj->fenced_gpu_access = false; |
1495 | 1481 | ||
1496 | obj->active = 0; | 1482 | obj->active = 0; |
@@ -1694,7 +1680,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1694 | struct drm_i915_gem_object, | 1680 | struct drm_i915_gem_object, |
1695 | ring_list); | 1681 | ring_list); |
1696 | 1682 | ||
1697 | obj->base.write_domain = 0; | ||
1698 | list_del_init(&obj->gpu_write_list); | 1683 | list_del_init(&obj->gpu_write_list); |
1699 | i915_gem_object_move_to_inactive(obj); | 1684 | i915_gem_object_move_to_inactive(obj); |
1700 | } | 1685 | } |
@@ -1731,20 +1716,6 @@ void i915_gem_reset(struct drm_device *dev) | |||
1731 | for_each_ring(ring, dev_priv, i) | 1716 | for_each_ring(ring, dev_priv, i) |
1732 | i915_gem_reset_ring_lists(dev_priv, ring); | 1717 | i915_gem_reset_ring_lists(dev_priv, ring); |
1733 | 1718 | ||
1734 | /* Remove anything from the flushing lists. The GPU cache is likely | ||
1735 | * to be lost on reset along with the data, so simply move the | ||
1736 | * lost bo to the inactive list. | ||
1737 | */ | ||
1738 | while (!list_empty(&dev_priv->mm.flushing_list)) { | ||
1739 | obj = list_first_entry(&dev_priv->mm.flushing_list, | ||
1740 | struct drm_i915_gem_object, | ||
1741 | mm_list); | ||
1742 | |||
1743 | obj->base.write_domain = 0; | ||
1744 | list_del_init(&obj->gpu_write_list); | ||
1745 | i915_gem_object_move_to_inactive(obj); | ||
1746 | } | ||
1747 | |||
1748 | /* Move everything out of the GPU domains to ensure we do any | 1719 | /* Move everything out of the GPU domains to ensure we do any |
1749 | * necessary invalidation upon reuse. | 1720 | * necessary invalidation upon reuse. |
1750 | */ | 1721 | */ |
@@ -1815,10 +1786,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | |||
1815 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) | 1786 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) |
1816 | break; | 1787 | break; |
1817 | 1788 | ||
1818 | if (obj->base.write_domain != 0) | 1789 | i915_gem_object_move_to_inactive(obj); |
1819 | i915_gem_object_move_to_flushing(obj); | ||
1820 | else | ||
1821 | i915_gem_object_move_to_inactive(obj); | ||
1822 | } | 1790 | } |
1823 | 1791 | ||
1824 | if (unlikely(ring->trace_irq_seqno && | 1792 | if (unlikely(ring->trace_irq_seqno && |
@@ -3897,7 +3865,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
3897 | } | 3865 | } |
3898 | 3866 | ||
3899 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 3867 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
3900 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
3901 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 3868 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
3902 | mutex_unlock(&dev->struct_mutex); | 3869 | mutex_unlock(&dev->struct_mutex); |
3903 | 3870 | ||
@@ -3955,7 +3922,6 @@ i915_gem_load(struct drm_device *dev) | |||
3955 | drm_i915_private_t *dev_priv = dev->dev_private; | 3922 | drm_i915_private_t *dev_priv = dev->dev_private; |
3956 | 3923 | ||
3957 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 3924 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
3958 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | ||
3959 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 3925 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
3960 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 3926 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
3961 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); | 3927 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
@@ -4206,12 +4172,7 @@ static int | |||
4206 | i915_gpu_is_active(struct drm_device *dev) | 4172 | i915_gpu_is_active(struct drm_device *dev) |
4207 | { | 4173 | { |
4208 | drm_i915_private_t *dev_priv = dev->dev_private; | 4174 | drm_i915_private_t *dev_priv = dev->dev_private; |
4209 | int lists_empty; | 4175 | return !list_empty(&dev_priv->mm.active_list); |
4210 | |||
4211 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||
4212 | list_empty(&dev_priv->mm.active_list); | ||
4213 | |||
4214 | return !lists_empty; | ||
4215 | } | 4176 | } |
4216 | 4177 | ||
4217 | static int | 4178 | static int |
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index eba0308f10e3..51e547c4ed89 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c | |||
@@ -93,23 +93,6 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, | |||
93 | 93 | ||
94 | /* Now merge in the soon-to-be-expired objects... */ | 94 | /* Now merge in the soon-to-be-expired objects... */ |
95 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | 95 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { |
96 | /* Does the object require an outstanding flush? */ | ||
97 | if (obj->base.write_domain) | ||
98 | continue; | ||
99 | |||
100 | if (mark_free(obj, &unwind_list)) | ||
101 | goto found; | ||
102 | } | ||
103 | |||
104 | /* Finally add anything with a pending flush (in order of retirement) */ | ||
105 | list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) { | ||
106 | if (mark_free(obj, &unwind_list)) | ||
107 | goto found; | ||
108 | } | ||
109 | list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) { | ||
110 | if (!obj->base.write_domain) | ||
111 | continue; | ||
112 | |||
113 | if (mark_free(obj, &unwind_list)) | 96 | if (mark_free(obj, &unwind_list)) |
114 | goto found; | 97 | goto found; |
115 | } | 98 | } |
@@ -172,7 +155,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
172 | int ret; | 155 | int ret; |
173 | 156 | ||
174 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && | 157 | lists_empty = (list_empty(&dev_priv->mm.inactive_list) && |
175 | list_empty(&dev_priv->mm.flushing_list) && | ||
176 | list_empty(&dev_priv->mm.active_list)); | 158 | list_empty(&dev_priv->mm.active_list)); |
177 | if (lists_empty) | 159 | if (lists_empty) |
178 | return -ENOSPC; | 160 | return -ENOSPC; |
@@ -189,8 +171,6 @@ i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only) | |||
189 | 171 | ||
190 | i915_gem_retire_requests(dev); | 172 | i915_gem_retire_requests(dev); |
191 | 173 | ||
192 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
193 | |||
194 | /* Having flushed everything, unbind() should never raise an error */ | 174 | /* Having flushed everything, unbind() should never raise an error */ |
195 | list_for_each_entry_safe(obj, next, | 175 | list_for_each_entry_safe(obj, next, |
196 | &dev_priv->mm.inactive_list, mm_list) { | 176 | &dev_priv->mm.inactive_list, mm_list) { |