diff options
author | Chris Wilson <chris@chris-wilson.co.uk> | 2016-08-02 17:50:25 -0400 |
---|---|---|
committer | Chris Wilson <chris@chris-wilson.co.uk> | 2016-08-02 17:58:19 -0400 |
commit | 7c9cf4e33a72c36a62471709d85d096eaac86dc6 (patch) | |
tree | 355a158bbb6db7d4fe3867bbe3806945a494cbda /drivers/gpu/drm/i915/intel_lrc.c | |
parent | c7fe7d25ed6036ff16b1c112463baff21c3b205d (diff) |
drm/i915: Reduce engine->emit_flush() to a single mode parameter
Rather than passing a complete set of GPU cache domains for either
invalidation or for flushing, or even both, just pass a single parameter
to the engine->emit_flush to determine the required operations.
engine->emit_flush(GPU, 0) -> engine->emit_flush(EMIT_INVALIDATE)
engine->emit_flush(0, GPU) -> engine->emit_flush(EMIT_FLUSH)
engine->emit_flush(GPU, GPU) -> engine->emit_flush(EMIT_FLUSH | EMIT_INVALIDATE)
This allows us to extend the behaviour easily in future, for example if
we want just a command barrier without the overhead of flushing.
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Dave Gordon <david.s.gordon@intel.com>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/1470174640-18242-8-git-send-email-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_lrc.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_lrc.c | 23 |
1 files changed, 8 insertions, 15 deletions
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c index e8d971e81491..af7d7e07748e 100644 --- a/drivers/gpu/drm/i915/intel_lrc.c +++ b/drivers/gpu/drm/i915/intel_lrc.c | |||
@@ -672,7 +672,7 @@ static int execlists_move_to_gpu(struct drm_i915_gem_request *req, | |||
672 | /* Unconditionally invalidate gpu caches and ensure that we do flush | 672 | /* Unconditionally invalidate gpu caches and ensure that we do flush |
673 | * any residual writes from the previous batch. | 673 | * any residual writes from the previous batch. |
674 | */ | 674 | */ |
675 | return req->engine->emit_flush(req, I915_GEM_GPU_DOMAINS, 0); | 675 | return req->engine->emit_flush(req, EMIT_INVALIDATE); |
676 | } | 676 | } |
677 | 677 | ||
678 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) | 678 | int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request) |
@@ -998,9 +998,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) | |||
998 | if (w->count == 0) | 998 | if (w->count == 0) |
999 | return 0; | 999 | return 0; |
1000 | 1000 | ||
1001 | ret = req->engine->emit_flush(req, | 1001 | ret = req->engine->emit_flush(req, EMIT_BARRIER); |
1002 | I915_GEM_GPU_DOMAINS, | ||
1003 | I915_GEM_GPU_DOMAINS); | ||
1004 | if (ret) | 1002 | if (ret) |
1005 | return ret; | 1003 | return ret; |
1006 | 1004 | ||
@@ -1017,9 +1015,7 @@ static int intel_logical_ring_workarounds_emit(struct drm_i915_gem_request *req) | |||
1017 | 1015 | ||
1018 | intel_ring_advance(ring); | 1016 | intel_ring_advance(ring); |
1019 | 1017 | ||
1020 | ret = req->engine->emit_flush(req, | 1018 | ret = req->engine->emit_flush(req, EMIT_BARRIER); |
1021 | I915_GEM_GPU_DOMAINS, | ||
1022 | I915_GEM_GPU_DOMAINS); | ||
1023 | if (ret) | 1019 | if (ret) |
1024 | return ret; | 1020 | return ret; |
1025 | 1021 | ||
@@ -1598,9 +1594,7 @@ static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) | |||
1598 | I915_WRITE_IMR(engine, ~engine->irq_keep_mask); | 1594 | I915_WRITE_IMR(engine, ~engine->irq_keep_mask); |
1599 | } | 1595 | } |
1600 | 1596 | ||
1601 | static int gen8_emit_flush(struct drm_i915_gem_request *request, | 1597 | static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode) |
1602 | u32 invalidate_domains, | ||
1603 | u32 unused) | ||
1604 | { | 1598 | { |
1605 | struct intel_ring *ring = request->ring; | 1599 | struct intel_ring *ring = request->ring; |
1606 | u32 cmd; | 1600 | u32 cmd; |
@@ -1619,7 +1613,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, | |||
1619 | */ | 1613 | */ |
1620 | cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; | 1614 | cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW; |
1621 | 1615 | ||
1622 | if (invalidate_domains & I915_GEM_GPU_DOMAINS) { | 1616 | if (mode & EMIT_INVALIDATE) { |
1623 | cmd |= MI_INVALIDATE_TLB; | 1617 | cmd |= MI_INVALIDATE_TLB; |
1624 | if (request->engine->id == VCS) | 1618 | if (request->engine->id == VCS) |
1625 | cmd |= MI_INVALIDATE_BSD; | 1619 | cmd |= MI_INVALIDATE_BSD; |
@@ -1637,8 +1631,7 @@ static int gen8_emit_flush(struct drm_i915_gem_request *request, | |||
1637 | } | 1631 | } |
1638 | 1632 | ||
1639 | static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | 1633 | static int gen8_emit_flush_render(struct drm_i915_gem_request *request, |
1640 | u32 invalidate_domains, | 1634 | u32 mode) |
1641 | u32 flush_domains) | ||
1642 | { | 1635 | { |
1643 | struct intel_ring *ring = request->ring; | 1636 | struct intel_ring *ring = request->ring; |
1644 | struct intel_engine_cs *engine = request->engine; | 1637 | struct intel_engine_cs *engine = request->engine; |
@@ -1650,14 +1643,14 @@ static int gen8_emit_flush_render(struct drm_i915_gem_request *request, | |||
1650 | 1643 | ||
1651 | flags |= PIPE_CONTROL_CS_STALL; | 1644 | flags |= PIPE_CONTROL_CS_STALL; |
1652 | 1645 | ||
1653 | if (flush_domains) { | 1646 | if (mode & EMIT_FLUSH) { |
1654 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | 1647 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; |
1655 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | 1648 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; |
1656 | flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; | 1649 | flags |= PIPE_CONTROL_DC_FLUSH_ENABLE; |
1657 | flags |= PIPE_CONTROL_FLUSH_ENABLE; | 1650 | flags |= PIPE_CONTROL_FLUSH_ENABLE; |
1658 | } | 1651 | } |
1659 | 1652 | ||
1660 | if (invalidate_domains) { | 1653 | if (mode & EMIT_INVALIDATE) { |
1661 | flags |= PIPE_CONTROL_TLB_INVALIDATE; | 1654 | flags |= PIPE_CONTROL_TLB_INVALIDATE; |
1662 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | 1655 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; |
1663 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | 1656 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; |