diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-07-20 07:41:02 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-07-25 12:23:52 -0400 |
commit | 65ce3027415d4dc9ee18ef0a135214b4fb76730b (patch) | |
tree | 74b94b51e2aa3a124311f410f74d8d13c1161ad8 /drivers/gpu/drm/i915/i915_gem.c | |
parent | 0201f1ecf4b81f08799b1fb9c8cdf1125b9b78a6 (diff) |
drm/i915: Remove the defunct flushing list
As we guarantee to emit a flush before emitting the breadcrumb or
the next batchbuffer, there is no further need for the flushing list.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 59 |
1 files changed, 10 insertions, 49 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 6a80d6565ef2..f62dd298a65d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1458,27 +1458,6 @@ i915_gem_object_move_to_active(struct drm_i915_gem_object *obj, | |||
1458 | } | 1458 | } |
1459 | 1459 | ||
1460 | static void | 1460 | static void |
1461 | i915_gem_object_move_off_active(struct drm_i915_gem_object *obj) | ||
1462 | { | ||
1463 | list_del_init(&obj->ring_list); | ||
1464 | obj->last_read_seqno = 0; | ||
1465 | obj->last_write_seqno = 0; | ||
1466 | obj->last_fenced_seqno = 0; | ||
1467 | } | ||
1468 | |||
1469 | static void | ||
1470 | i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj) | ||
1471 | { | ||
1472 | struct drm_device *dev = obj->base.dev; | ||
1473 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1474 | |||
1475 | BUG_ON(!obj->active); | ||
1476 | list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list); | ||
1477 | |||
1478 | i915_gem_object_move_off_active(obj); | ||
1479 | } | ||
1480 | |||
1481 | static void | ||
1482 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | 1461 | i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) |
1483 | { | 1462 | { |
1484 | struct drm_device *dev = obj->base.dev; | 1463 | struct drm_device *dev = obj->base.dev; |
@@ -1487,10 +1466,17 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
1487 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 1466 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1488 | 1467 | ||
1489 | BUG_ON(!list_empty(&obj->gpu_write_list)); | 1468 | BUG_ON(!list_empty(&obj->gpu_write_list)); |
1469 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | ||
1490 | BUG_ON(!obj->active); | 1470 | BUG_ON(!obj->active); |
1471 | |||
1472 | list_del_init(&obj->ring_list); | ||
1491 | obj->ring = NULL; | 1473 | obj->ring = NULL; |
1492 | 1474 | ||
1493 | i915_gem_object_move_off_active(obj); | 1475 | obj->last_read_seqno = 0; |
1476 | obj->last_write_seqno = 0; | ||
1477 | obj->base.write_domain = 0; | ||
1478 | |||
1479 | obj->last_fenced_seqno = 0; | ||
1494 | obj->fenced_gpu_access = false; | 1480 | obj->fenced_gpu_access = false; |
1495 | 1481 | ||
1496 | obj->active = 0; | 1482 | obj->active = 0; |
@@ -1694,7 +1680,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1694 | struct drm_i915_gem_object, | 1680 | struct drm_i915_gem_object, |
1695 | ring_list); | 1681 | ring_list); |
1696 | 1682 | ||
1697 | obj->base.write_domain = 0; | ||
1698 | list_del_init(&obj->gpu_write_list); | 1683 | list_del_init(&obj->gpu_write_list); |
1699 | i915_gem_object_move_to_inactive(obj); | 1684 | i915_gem_object_move_to_inactive(obj); |
1700 | } | 1685 | } |
@@ -1731,20 +1716,6 @@ void i915_gem_reset(struct drm_device *dev) | |||
1731 | for_each_ring(ring, dev_priv, i) | 1716 | for_each_ring(ring, dev_priv, i) |
1732 | i915_gem_reset_ring_lists(dev_priv, ring); | 1717 | i915_gem_reset_ring_lists(dev_priv, ring); |
1733 | 1718 | ||
1734 | /* Remove anything from the flushing lists. The GPU cache is likely | ||
1735 | * to be lost on reset along with the data, so simply move the | ||
1736 | * lost bo to the inactive list. | ||
1737 | */ | ||
1738 | while (!list_empty(&dev_priv->mm.flushing_list)) { | ||
1739 | obj = list_first_entry(&dev_priv->mm.flushing_list, | ||
1740 | struct drm_i915_gem_object, | ||
1741 | mm_list); | ||
1742 | |||
1743 | obj->base.write_domain = 0; | ||
1744 | list_del_init(&obj->gpu_write_list); | ||
1745 | i915_gem_object_move_to_inactive(obj); | ||
1746 | } | ||
1747 | |||
1748 | /* Move everything out of the GPU domains to ensure we do any | 1719 | /* Move everything out of the GPU domains to ensure we do any |
1749 | * necessary invalidation upon reuse. | 1720 | * necessary invalidation upon reuse. |
1750 | */ | 1721 | */ |
@@ -1815,10 +1786,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) | |||
1815 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) | 1786 | if (!i915_seqno_passed(seqno, obj->last_read_seqno)) |
1816 | break; | 1787 | break; |
1817 | 1788 | ||
1818 | if (obj->base.write_domain != 0) | 1789 | i915_gem_object_move_to_inactive(obj); |
1819 | i915_gem_object_move_to_flushing(obj); | ||
1820 | else | ||
1821 | i915_gem_object_move_to_inactive(obj); | ||
1822 | } | 1790 | } |
1823 | 1791 | ||
1824 | if (unlikely(ring->trace_irq_seqno && | 1792 | if (unlikely(ring->trace_irq_seqno && |
@@ -3897,7 +3865,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | |||
3897 | } | 3865 | } |
3898 | 3866 | ||
3899 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); | 3867 | BUG_ON(!list_empty(&dev_priv->mm.active_list)); |
3900 | BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
3901 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | 3868 | BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); |
3902 | mutex_unlock(&dev->struct_mutex); | 3869 | mutex_unlock(&dev->struct_mutex); |
3903 | 3870 | ||
@@ -3955,7 +3922,6 @@ i915_gem_load(struct drm_device *dev) | |||
3955 | drm_i915_private_t *dev_priv = dev->dev_private; | 3922 | drm_i915_private_t *dev_priv = dev->dev_private; |
3956 | 3923 | ||
3957 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 3924 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
3958 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | ||
3959 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 3925 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
3960 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 3926 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
3961 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); | 3927 | INIT_LIST_HEAD(&dev_priv->mm.gtt_list); |
@@ -4206,12 +4172,7 @@ static int | |||
4206 | i915_gpu_is_active(struct drm_device *dev) | 4172 | i915_gpu_is_active(struct drm_device *dev) |
4207 | { | 4173 | { |
4208 | drm_i915_private_t *dev_priv = dev->dev_private; | 4174 | drm_i915_private_t *dev_priv = dev->dev_private; |
4209 | int lists_empty; | 4175 | return !list_empty(&dev_priv->mm.active_list); |
4210 | |||
4211 | lists_empty = list_empty(&dev_priv->mm.flushing_list) && | ||
4212 | list_empty(&dev_priv->mm.active_list); | ||
4213 | |||
4214 | return !lists_empty; | ||
4215 | } | 4176 | } |
4216 | 4177 | ||
4217 | static int | 4178 | static int |