aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorOscar Mateo <oscar.mateo@intel.com>2014-07-24 12:04:26 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-08-11 16:42:36 -0400
commit82e104cc266c6da30a30fc5028b2f0236c669cd7 (patch)
tree6d3726f8eac503e3bb7f66739eca1e468da56015 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent26fbb77445bd402417f42936f68c0da26d33855d (diff)
drm/i915/bdw: New logical ring submission mechanism
Well, new-ish: if all this code looks familiar, that's because it's a clone of the existing submission mechanism (with some modifications here and there to adapt it to LRCs and Execlists). And why did we do this instead of reusing code, one might wonder? Well, there are some fears that the differences are big enough that they will end up breaking all platforms. Also, Execlists offer several advantages, like control over when the GPU is done with a given workload, that can help simplify the submission mechanism, no doubt. I am interested in getting Execlists to work first and foremost, but in the future this parallel submission mechanism will help us to fine tune the mechanism without affecting old gens. v2: Pass the ringbuffer only (whenever possible). Signed-off-by: Oscar Mateo <oscar.mateo@intel.com> Reviewed-by: Damien Lespiau <damien.lespiau@intel.com> [danvet: Appease checkpatch. Again. And drop the legacy sarea gunk that somehow crept in.] Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c22
1 files changed, 12 insertions, 10 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index dab5e7c79036..0bfa018fab20 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -57,7 +57,7 @@ intel_ring_initialized(struct intel_engine_cs *ring)
57 return ring->buffer && ring->buffer->obj; 57 return ring->buffer && ring->buffer->obj;
58} 58}
59 59
60static inline int __ring_space(int head, int tail, int size) 60int __intel_ring_space(int head, int tail, int size)
61{ 61{
62 int space = head - (tail + I915_RING_FREE_SPACE); 62 int space = head - (tail + I915_RING_FREE_SPACE);
63 if (space < 0) 63 if (space < 0)
@@ -65,12 +65,13 @@ static inline int __ring_space(int head, int tail, int size)
65 return space; 65 return space;
66} 66}
67 67
68static inline int ring_space(struct intel_ringbuffer *ringbuf) 68int intel_ring_space(struct intel_ringbuffer *ringbuf)
69{ 69{
70 return __ring_space(ringbuf->head & HEAD_ADDR, ringbuf->tail, ringbuf->size); 70 return __intel_ring_space(ringbuf->head & HEAD_ADDR,
71 ringbuf->tail, ringbuf->size);
71} 72}
72 73
73static bool intel_ring_stopped(struct intel_engine_cs *ring) 74bool intel_ring_stopped(struct intel_engine_cs *ring)
74{ 75{
75 struct drm_i915_private *dev_priv = ring->dev->dev_private; 76 struct drm_i915_private *dev_priv = ring->dev->dev_private;
76 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring); 77 return dev_priv->gpu_error.stop_rings & intel_ring_flag(ring);
@@ -585,7 +586,7 @@ static int init_ring_common(struct intel_engine_cs *ring)
585 else { 586 else {
586 ringbuf->head = I915_READ_HEAD(ring); 587 ringbuf->head = I915_READ_HEAD(ring);
587 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 588 ringbuf->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
588 ringbuf->space = ring_space(ringbuf); 589 ringbuf->space = intel_ring_space(ringbuf);
589 ringbuf->last_retired_head = -1; 590 ringbuf->last_retired_head = -1;
590 } 591 }
591 592
@@ -1702,13 +1703,14 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1702 ringbuf->head = ringbuf->last_retired_head; 1703 ringbuf->head = ringbuf->last_retired_head;
1703 ringbuf->last_retired_head = -1; 1704 ringbuf->last_retired_head = -1;
1704 1705
1705 ringbuf->space = ring_space(ringbuf); 1706 ringbuf->space = intel_ring_space(ringbuf);
1706 if (ringbuf->space >= n) 1707 if (ringbuf->space >= n)
1707 return 0; 1708 return 0;
1708 } 1709 }
1709 1710
1710 list_for_each_entry(request, &ring->request_list, list) { 1711 list_for_each_entry(request, &ring->request_list, list) {
1711 if (__ring_space(request->tail, ringbuf->tail, ringbuf->size) >= n) { 1712 if (__intel_ring_space(request->tail, ringbuf->tail,
1713 ringbuf->size) >= n) {
1712 seqno = request->seqno; 1714 seqno = request->seqno;
1713 break; 1715 break;
1714 } 1716 }
@@ -1725,7 +1727,7 @@ static int intel_ring_wait_request(struct intel_engine_cs *ring, int n)
1725 ringbuf->head = ringbuf->last_retired_head; 1727 ringbuf->head = ringbuf->last_retired_head;
1726 ringbuf->last_retired_head = -1; 1728 ringbuf->last_retired_head = -1;
1727 1729
1728 ringbuf->space = ring_space(ringbuf); 1730 ringbuf->space = intel_ring_space(ringbuf);
1729 return 0; 1731 return 0;
1730} 1732}
1731 1733
@@ -1754,7 +1756,7 @@ static int ring_wait_for_space(struct intel_engine_cs *ring, int n)
1754 trace_i915_ring_wait_begin(ring); 1756 trace_i915_ring_wait_begin(ring);
1755 do { 1757 do {
1756 ringbuf->head = I915_READ_HEAD(ring); 1758 ringbuf->head = I915_READ_HEAD(ring);
1757 ringbuf->space = ring_space(ringbuf); 1759 ringbuf->space = intel_ring_space(ringbuf);
1758 if (ringbuf->space >= n) { 1760 if (ringbuf->space >= n) {
1759 ret = 0; 1761 ret = 0;
1760 break; 1762 break;
@@ -1806,7 +1808,7 @@ static int intel_wrap_ring_buffer(struct intel_engine_cs *ring)
1806 iowrite32(MI_NOOP, virt++); 1808 iowrite32(MI_NOOP, virt++);
1807 1809
1808 ringbuf->tail = 0; 1810 ringbuf->tail = 0;
1809 ringbuf->space = ring_space(ringbuf); 1811 ringbuf->space = intel_ring_space(ringbuf);
1810 1812
1811 return 0; 1813 return 0;
1812} 1814}