diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 9 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 36 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 9 |
4 files changed, 29 insertions, 26 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cc9cb0dda6f..2c2c19b6285 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -571,15 +571,6 @@ typedef struct drm_i915_private { | |||
571 | struct list_head flushing_list; | 571 | struct list_head flushing_list; |
572 | 572 | ||
573 | /** | 573 | /** |
574 | * List of objects currently pending a GPU write flush. | ||
575 | * | ||
576 | * All elements on this list will belong to either the | ||
577 | * active_list or flushing_list, last_rendering_seqno can | ||
578 | * be used to differentiate between the two elements. | ||
579 | */ | ||
580 | struct list_head gpu_write_list; | ||
581 | |||
582 | /** | ||
583 | * LRU list of objects which are not in the ringbuffer and | 574 | * LRU list of objects which are not in the ringbuffer and |
584 | * are ready to unbind, but are still in the GTT. | 575 | * are ready to unbind, but are still in the GTT. |
585 | * | 576 | * |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index e7f27a5b89d..6c2618d884e 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -1657,12 +1657,11 @@ i915_gem_process_flushing_list(struct drm_device *dev, | |||
1657 | struct drm_i915_gem_object *obj_priv, *next; | 1657 | struct drm_i915_gem_object *obj_priv, *next; |
1658 | 1658 | ||
1659 | list_for_each_entry_safe(obj_priv, next, | 1659 | list_for_each_entry_safe(obj_priv, next, |
1660 | &dev_priv->mm.gpu_write_list, | 1660 | &ring->gpu_write_list, |
1661 | gpu_write_list) { | 1661 | gpu_write_list) { |
1662 | struct drm_gem_object *obj = &obj_priv->base; | 1662 | struct drm_gem_object *obj = &obj_priv->base; |
1663 | 1663 | ||
1664 | if (obj->write_domain & flush_domains && | 1664 | if (obj->write_domain & flush_domains) { |
1665 | obj_priv->ring == ring) { | ||
1666 | uint32_t old_write_domain = obj->write_domain; | 1665 | uint32_t old_write_domain = obj->write_domain; |
1667 | 1666 | ||
1668 | obj->write_domain = 0; | 1667 | obj->write_domain = 0; |
@@ -2173,6 +2172,9 @@ i915_gem_object_unbind(struct drm_gem_object *obj) | |||
2173 | static int i915_ring_idle(struct drm_device *dev, | 2172 | static int i915_ring_idle(struct drm_device *dev, |
2174 | struct intel_ring_buffer *ring) | 2173 | struct intel_ring_buffer *ring) |
2175 | { | 2174 | { |
2175 | if (list_empty(&ring->gpu_write_list)) | ||
2176 | return 0; | ||
2177 | |||
2176 | i915_gem_flush_ring(dev, NULL, ring, | 2178 | i915_gem_flush_ring(dev, NULL, ring, |
2177 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); | 2179 | I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS); |
2178 | return i915_wait_request(dev, | 2180 | return i915_wait_request(dev, |
@@ -3786,14 +3788,8 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3786 | 3788 | ||
3787 | for (i = 0; i < args->buffer_count; i++) { | 3789 | for (i = 0; i < args->buffer_count; i++) { |
3788 | struct drm_gem_object *obj = object_list[i]; | 3790 | struct drm_gem_object *obj = object_list[i]; |
3789 | struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); | ||
3790 | uint32_t old_write_domain = obj->write_domain; | 3791 | uint32_t old_write_domain = obj->write_domain; |
3791 | |||
3792 | obj->write_domain = obj->pending_write_domain; | 3792 | obj->write_domain = obj->pending_write_domain; |
3793 | if (obj->write_domain) | ||
3794 | list_move_tail(&obj_priv->gpu_write_list, | ||
3795 | &dev_priv->mm.gpu_write_list); | ||
3796 | |||
3797 | trace_i915_gem_object_change_domain(obj, | 3793 | trace_i915_gem_object_change_domain(obj, |
3798 | obj->read_domains, | 3794 | obj->read_domains, |
3799 | old_write_domain); | 3795 | old_write_domain); |
@@ -3858,9 +3854,11 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3858 | 3854 | ||
3859 | for (i = 0; i < args->buffer_count; i++) { | 3855 | for (i = 0; i < args->buffer_count; i++) { |
3860 | struct drm_gem_object *obj = object_list[i]; | 3856 | struct drm_gem_object *obj = object_list[i]; |
3861 | obj_priv = to_intel_bo(obj); | ||
3862 | 3857 | ||
3863 | i915_gem_object_move_to_active(obj, ring); | 3858 | i915_gem_object_move_to_active(obj, ring); |
3859 | if (obj->write_domain) | ||
3860 | list_move_tail(&to_intel_bo(obj)->gpu_write_list, | ||
3861 | &ring->gpu_write_list); | ||
3864 | } | 3862 | } |
3865 | 3863 | ||
3866 | i915_add_request(dev, file, request, ring); | 3864 | i915_add_request(dev, file, request, ring); |
@@ -4618,6 +4616,14 @@ i915_gem_lastclose(struct drm_device *dev) | |||
4618 | DRM_ERROR("failed to idle hardware: %d\n", ret); | 4616 | DRM_ERROR("failed to idle hardware: %d\n", ret); |
4619 | } | 4617 | } |
4620 | 4618 | ||
4619 | static void | ||
4620 | init_ring_lists(struct intel_ring_buffer *ring) | ||
4621 | { | ||
4622 | INIT_LIST_HEAD(&ring->active_list); | ||
4623 | INIT_LIST_HEAD(&ring->request_list); | ||
4624 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
4625 | } | ||
4626 | |||
4621 | void | 4627 | void |
4622 | i915_gem_load(struct drm_device *dev) | 4628 | i915_gem_load(struct drm_device *dev) |
4623 | { | 4629 | { |
@@ -4626,17 +4632,13 @@ i915_gem_load(struct drm_device *dev) | |||
4626 | 4632 | ||
4627 | INIT_LIST_HEAD(&dev_priv->mm.active_list); | 4633 | INIT_LIST_HEAD(&dev_priv->mm.active_list); |
4628 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | 4634 | INIT_LIST_HEAD(&dev_priv->mm.flushing_list); |
4629 | INIT_LIST_HEAD(&dev_priv->mm.gpu_write_list); | ||
4630 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | 4635 | INIT_LIST_HEAD(&dev_priv->mm.inactive_list); |
4631 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); | 4636 | INIT_LIST_HEAD(&dev_priv->mm.pinned_list); |
4632 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); | 4637 | INIT_LIST_HEAD(&dev_priv->mm.fence_list); |
4633 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); | 4638 | INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list); |
4634 | INIT_LIST_HEAD(&dev_priv->render_ring.active_list); | 4639 | init_ring_lists(&dev_priv->render_ring); |
4635 | INIT_LIST_HEAD(&dev_priv->render_ring.request_list); | 4640 | init_ring_lists(&dev_priv->bsd_ring); |
4636 | INIT_LIST_HEAD(&dev_priv->bsd_ring.active_list); | 4641 | init_ring_lists(&dev_priv->blt_ring); |
4637 | INIT_LIST_HEAD(&dev_priv->bsd_ring.request_list); | ||
4638 | INIT_LIST_HEAD(&dev_priv->blt_ring.active_list); | ||
4639 | INIT_LIST_HEAD(&dev_priv->blt_ring.request_list); | ||
4640 | for (i = 0; i < 16; i++) | 4642 | for (i = 0; i < 16; i++) |
4641 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); | 4643 | INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); |
4642 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | 4644 | INIT_DELAYED_WORK(&dev_priv->mm.retire_work, |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 4b53ca81ea4..09f2dc353ae 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -580,6 +580,7 @@ int intel_init_ring_buffer(struct drm_device *dev, | |||
580 | ring->dev = dev; | 580 | ring->dev = dev; |
581 | INIT_LIST_HEAD(&ring->active_list); | 581 | INIT_LIST_HEAD(&ring->active_list); |
582 | INIT_LIST_HEAD(&ring->request_list); | 582 | INIT_LIST_HEAD(&ring->request_list); |
583 | INIT_LIST_HEAD(&ring->gpu_write_list); | ||
583 | 584 | ||
584 | if (I915_NEED_GFX_HWS(dev)) { | 585 | if (I915_NEED_GFX_HWS(dev)) { |
585 | ret = init_status_page(dev, ring); | 586 | ret = init_status_page(dev, ring); |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 6ab40c6058f..a05aff0e576 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -83,6 +83,15 @@ struct intel_ring_buffer { | |||
83 | struct list_head request_list; | 83 | struct list_head request_list; |
84 | 84 | ||
85 | /** | 85 | /** |
86 | * List of objects currently pending a GPU write flush. | ||
87 | * | ||
88 | * All elements on this list will belong to either the | ||
89 | * active_list or flushing_list, last_rendering_seqno can | ||
90 | * be used to differentiate between the two elements. | ||
91 | */ | ||
92 | struct list_head gpu_write_list; | ||
93 | |||
94 | /** | ||
86 | * Do we have some not yet emitted requests outstanding? | 95 | * Do we have some not yet emitted requests outstanding? |
87 | */ | 96 | */ |
88 | bool outstanding_lazy_request; | 97 | bool outstanding_lazy_request; |