diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2012-07-20 07:41:03 -0400 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-07-25 12:23:53 -0400 |
commit | 69c2fc891343cb5217c866d10709343cff190bdc (patch) | |
tree | e656c584f732c1cf1b3fc709ba54ff216565f985 | |
parent | 65ce3027415d4dc9ee18ef0a135214b4fb76730b (diff) |
drm/i915: Remove the per-ring write list
This is now handled by a global flag to ensure we emit a flush before
the next serialisation point (if we failed to queue one previously).
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 53 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem_execbuffer.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 9 |
5 files changed, 3 insertions, 70 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 6b91755f7743..59e3199da162 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -865,8 +865,6 @@ struct drm_i915_gem_object { | |||
865 | /** This object's place on the active/inactive lists */ | 865 | /** This object's place on the active/inactive lists */ |
866 | struct list_head ring_list; | 866 | struct list_head ring_list; |
867 | struct list_head mm_list; | 867 | struct list_head mm_list; |
868 | /** This object's place on GPU write list */ | ||
869 | struct list_head gpu_write_list; | ||
870 | /** This object's place in the batchbuffer or on the eviction list */ | 868 | /** This object's place in the batchbuffer or on the eviction list */ |
871 | struct list_head exec_list; | 869 | struct list_head exec_list; |
872 | 870 | ||
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f62dd298a65d..78fa9503a34d 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1465,7 +1465,6 @@ i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj) | |||
1465 | 1465 | ||
1466 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); | 1466 | list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list); |
1467 | 1467 | ||
1468 | BUG_ON(!list_empty(&obj->gpu_write_list)); | ||
1469 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); | 1468 | BUG_ON(obj->base.write_domain & ~I915_GEM_GPU_DOMAINS); |
1470 | BUG_ON(!obj->active); | 1469 | BUG_ON(!obj->active); |
1471 | 1470 | ||
@@ -1511,30 +1510,6 @@ i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj) | |||
1511 | return obj->madv == I915_MADV_DONTNEED; | 1510 | return obj->madv == I915_MADV_DONTNEED; |
1512 | } | 1511 | } |
1513 | 1512 | ||
1514 | static void | ||
1515 | i915_gem_process_flushing_list(struct intel_ring_buffer *ring, | ||
1516 | uint32_t flush_domains) | ||
1517 | { | ||
1518 | struct drm_i915_gem_object *obj, *next; | ||
1519 | |||
1520 | list_for_each_entry_safe(obj, next, | ||
1521 | &ring->gpu_write_list, | ||
1522 | gpu_write_list) { | ||
1523 | if (obj->base.write_domain & flush_domains) { | ||
1524 | uint32_t old_write_domain = obj->base.write_domain; | ||
1525 | |||
1526 | obj->base.write_domain = 0; | ||
1527 | list_del_init(&obj->gpu_write_list); | ||
1528 | i915_gem_object_move_to_active(obj, ring, | ||
1529 | i915_gem_next_request_seqno(ring)); | ||
1530 | |||
1531 | trace_i915_gem_object_change_domain(obj, | ||
1532 | obj->base.read_domains, | ||
1533 | old_write_domain); | ||
1534 | } | ||
1535 | } | ||
1536 | } | ||
1537 | |||
1538 | static u32 | 1513 | static u32 |
1539 | i915_gem_get_seqno(struct drm_device *dev) | 1514 | i915_gem_get_seqno(struct drm_device *dev) |
1540 | { | 1515 | { |
@@ -1637,8 +1612,6 @@ i915_add_request(struct intel_ring_buffer *ring, | |||
1637 | &dev_priv->mm.retire_work, HZ); | 1612 | &dev_priv->mm.retire_work, HZ); |
1638 | } | 1613 | } |
1639 | 1614 | ||
1640 | WARN_ON(!list_empty(&ring->gpu_write_list)); | ||
1641 | |||
1642 | return 0; | 1615 | return 0; |
1643 | } | 1616 | } |
1644 | 1617 | ||
@@ -1680,7 +1653,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, | |||
1680 | struct drm_i915_gem_object, | 1653 | struct drm_i915_gem_object, |
1681 | ring_list); | 1654 | ring_list); |
1682 | 1655 | ||
1683 | list_del_init(&obj->gpu_write_list); | ||
1684 | i915_gem_object_move_to_inactive(obj); | 1656 | i915_gem_object_move_to_inactive(obj); |
1685 | } | 1657 | } |
1686 | } | 1658 | } |
@@ -2011,11 +1983,6 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj, | |||
2011 | u32 seqno; | 1983 | u32 seqno; |
2012 | int ret; | 1984 | int ret; |
2013 | 1985 | ||
2014 | /* This function only exists to support waiting for existing rendering, | ||
2015 | * not for emitting required flushes. | ||
2016 | */ | ||
2017 | BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0); | ||
2018 | |||
2019 | /* If there is rendering queued on the buffer being evicted, wait for | 1986 | /* If there is rendering queued on the buffer being evicted, wait for |
2020 | * it. | 1987 | * it. |
2021 | */ | 1988 | */ |
@@ -2308,26 +2275,14 @@ i915_gem_flush_ring(struct intel_ring_buffer *ring, | |||
2308 | if (ret) | 2275 | if (ret) |
2309 | return ret; | 2276 | return ret; |
2310 | 2277 | ||
2311 | if (flush_domains & I915_GEM_GPU_DOMAINS) | ||
2312 | i915_gem_process_flushing_list(ring, flush_domains); | ||
2313 | |||
2314 | return 0; | 2278 | return 0; |
2315 | } | 2279 | } |
2316 | 2280 | ||
2317 | static int i915_ring_idle(struct intel_ring_buffer *ring) | 2281 | static int i915_ring_idle(struct intel_ring_buffer *ring) |
2318 | { | 2282 | { |
2319 | int ret; | 2283 | if (list_empty(&ring->active_list)) |
2320 | |||
2321 | if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list)) | ||
2322 | return 0; | 2284 | return 0; |
2323 | 2285 | ||
2324 | if (!list_empty(&ring->gpu_write_list)) { | ||
2325 | ret = i915_gem_flush_ring(ring, | ||
2326 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | ||
2327 | if (ret) | ||
2328 | return ret; | ||
2329 | } | ||
2330 | |||
2331 | return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); | 2286 | return i915_wait_seqno(ring, i915_gem_next_request_seqno(ring)); |
2332 | } | 2287 | } |
2333 | 2288 | ||
@@ -2343,10 +2298,6 @@ int i915_gpu_idle(struct drm_device *dev) | |||
2343 | if (ret) | 2298 | if (ret) |
2344 | return ret; | 2299 | return ret; |
2345 | 2300 | ||
2346 | /* Is the device fubar? */ | ||
2347 | if (WARN_ON(!list_empty(&ring->gpu_write_list))) | ||
2348 | return -EBUSY; | ||
2349 | |||
2350 | ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); | 2301 | ret = i915_switch_context(ring, NULL, DEFAULT_CONTEXT_ID); |
2351 | if (ret) | 2302 | if (ret) |
2352 | return ret; | 2303 | return ret; |
@@ -3491,7 +3442,6 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev, | |||
3491 | INIT_LIST_HEAD(&obj->gtt_list); | 3442 | INIT_LIST_HEAD(&obj->gtt_list); |
3492 | INIT_LIST_HEAD(&obj->ring_list); | 3443 | INIT_LIST_HEAD(&obj->ring_list); |
3493 | INIT_LIST_HEAD(&obj->exec_list); | 3444 | INIT_LIST_HEAD(&obj->exec_list); |
3494 | INIT_LIST_HEAD(&obj->gpu_write_list); | ||
3495 | obj->madv = I915_MADV_WILLNEED; | 3445 | obj->madv = I915_MADV_WILLNEED; |
3496 | /* Avoid an unnecessary call to unbind on the first bind. */ | 3446 | /* Avoid an unnecessary call to unbind on the first bind. */ |
3497 | obj->map_and_fenceable = true; | 3447 | obj->map_and_fenceable = true; |
@@ -3912,7 +3862,6 @@ init_ring_lists(struct intel_ring_buffer *ring) | |||
3912 | { | 3862 | { |
3913 | INIT_LIST_HEAD(&ring->active_list); | 3863 | INIT_LIST_HEAD(&ring->active_list); |
3914 | INIT_LIST_HEAD(&ring->request_list); | 3864 | INIT_LIST_HEAD(&ring->request_list); |
3915 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
3916 | } | 3865 | } |
3917 | 3866 | ||
3918 | void | 3867 | void |
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c index 2353e6ee2f0d..36c940c1a978 100644 --- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c | |||
@@ -943,9 +943,8 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
943 | struct drm_i915_gem_object *obj; | 943 | struct drm_i915_gem_object *obj; |
944 | 944 | ||
945 | list_for_each_entry(obj, objects, exec_list) { | 945 | list_for_each_entry(obj, objects, exec_list) { |
946 | u32 old_read = obj->base.read_domains; | 946 | u32 old_read = obj->base.read_domains; |
947 | u32 old_write = obj->base.write_domain; | 947 | u32 old_write = obj->base.write_domain; |
948 | |||
949 | 948 | ||
950 | obj->base.read_domains = obj->base.pending_read_domains; | 949 | obj->base.read_domains = obj->base.pending_read_domains; |
951 | obj->base.write_domain = obj->base.pending_write_domain; | 950 | obj->base.write_domain = obj->base.pending_write_domain; |
@@ -955,8 +954,6 @@ i915_gem_execbuffer_move_to_active(struct list_head *objects, | |||
955 | if (obj->base.write_domain) { | 954 | if (obj->base.write_domain) { |
956 | obj->dirty = 1; | 955 | obj->dirty = 1; |
957 | obj->last_write_seqno = seqno; | 956 | obj->last_write_seqno = seqno; |
958 | list_move_tail(&obj->gpu_write_list, | ||
959 | &ring->gpu_write_list); | ||
960 | if (obj->pin_count) /* check for potential scanout */ | 957 | if (obj->pin_count) /* check for potential scanout */ |
961 | intel_mark_busy(ring->dev, obj); | 958 | intel_mark_busy(ring->dev, obj); |
962 | } | 959 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index bf0195a96d53..8f221d9a7bdb 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -1002,7 +1002,6 @@ static int intel_init_ring_buffer(struct drm_device *dev, | |||
1002 | ring->dev = dev; | 1002 | ring->dev = dev; |
1003 | INIT_LIST_HEAD(&ring->active_list); | 1003 | INIT_LIST_HEAD(&ring->active_list); |
1004 | INIT_LIST_HEAD(&ring->request_list); | 1004 | INIT_LIST_HEAD(&ring->request_list); |
1005 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
1006 | ring->size = 32 * PAGE_SIZE; | 1005 | ring->size = 32 * PAGE_SIZE; |
1007 | 1006 | ||
1008 | init_waitqueue_head(&ring->irq_queue); | 1007 | init_waitqueue_head(&ring->irq_queue); |
@@ -1473,7 +1472,6 @@ int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size) | |||
1473 | ring->dev = dev; | 1472 | ring->dev = dev; |
1474 | INIT_LIST_HEAD(&ring->active_list); | 1473 | INIT_LIST_HEAD(&ring->active_list); |
1475 | INIT_LIST_HEAD(&ring->request_list); | 1474 | INIT_LIST_HEAD(&ring->request_list); |
1476 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
1477 | 1475 | ||
1478 | ring->size = size; | 1476 | ring->size = size; |
1479 | ring->effective_size = ring->size; | 1477 | ring->effective_size = ring->size; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 1d3c81fdad92..7986f3001cf0 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -101,15 +101,6 @@ struct intel_ring_buffer { | |||
101 | struct list_head request_list; | 101 | struct list_head request_list; |
102 | 102 | ||
103 | /** | 103 | /** |
104 | * List of objects currently pending a GPU write flush. | ||
105 | * | ||
106 | * All elements on this list will belong to either the | ||
107 | * active_list or flushing_list, last_rendering_seqno can | ||
108 | * be used to differentiate between the two elements. | ||
109 | */ | ||
110 | struct list_head gpu_write_list; | ||
111 | |||
112 | /** | ||
113 | * Do we have some not yet emitted requests outstanding? | 104 | * Do we have some not yet emitted requests outstanding? |
114 | */ | 105 | */ |
115 | u32 outstanding_lazy_request; | 106 | u32 outstanding_lazy_request; |