aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohn Harrison <John.C.Harrison@Intel.com>2015-05-29 12:44:00 -0400
committerDaniel Vetter <daniel.vetter@ffwll.ch>2015-06-23 08:02:24 -0400
commitee044a8863de58044cb370c23f97b9b68b33e47b (patch)
tree5aff8c64f460123456aed365432db911cb0025ea
parent7deb4d3980ea44ebb4097426f85d5f6c89b873a4 (diff)
drm/i915: Update ring->add_request() to take a request structure
Updated the various ring->add_request() implementations to take a request instead of a ring. This removes their reliance on the OLR to obtain the seqno value that the request should be tagged with. For: VIZ-5115 Signed-off-by: John Harrison <John.C.Harrison@Intel.com> Reviewed-by: Tomas Elf <tomas.elf@intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c2
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c26
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h2
3 files changed, 14 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 25fe1ef32eaa..6d511d32f72a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2524,7 +2524,7 @@ void __i915_add_request(struct drm_i915_gem_request *request,
2524 if (i915.enable_execlists) 2524 if (i915.enable_execlists)
2525 ret = ring->emit_request(ringbuf, request); 2525 ret = ring->emit_request(ringbuf, request);
2526 else { 2526 else {
2527 ret = ring->add_request(ring); 2527 ret = ring->add_request(request);
2528 2528
2529 request->tail = intel_ring_get_tail(ringbuf); 2529 request->tail = intel_ring_get_tail(ringbuf);
2530 } 2530 }
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index e0aa008f0555..28d7801a8fa5 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1288,16 +1288,16 @@ static int gen6_signal(struct intel_engine_cs *signaller,
1288 1288
1289/** 1289/**
1290 * gen6_add_request - Update the semaphore mailbox registers 1290 * gen6_add_request - Update the semaphore mailbox registers
1291 * 1291 *
1292 * @ring - ring that is adding a request 1292 * @request - request to write to the ring
1293 * @seqno - return seqno stuck into the ring
1294 * 1293 *
1295 * Update the mailbox registers in the *other* rings with the current seqno. 1294 * Update the mailbox registers in the *other* rings with the current seqno.
1296 * This acts like a signal in the canonical semaphore. 1295 * This acts like a signal in the canonical semaphore.
1297 */ 1296 */
1298static int 1297static int
1299gen6_add_request(struct intel_engine_cs *ring) 1298gen6_add_request(struct drm_i915_gem_request *req)
1300{ 1299{
1300 struct intel_engine_cs *ring = req->ring;
1301 int ret; 1301 int ret;
1302 1302
1303 if (ring->semaphore.signal) 1303 if (ring->semaphore.signal)
@@ -1310,8 +1310,7 @@ gen6_add_request(struct intel_engine_cs *ring)
1310 1310
1311 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1311 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1312 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1312 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1313 intel_ring_emit(ring, 1313 intel_ring_emit(ring, i915_gem_request_get_seqno(req));
1314 i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1315 intel_ring_emit(ring, MI_USER_INTERRUPT); 1314 intel_ring_emit(ring, MI_USER_INTERRUPT);
1316 __intel_ring_advance(ring); 1315 __intel_ring_advance(ring);
1317 1316
@@ -1408,8 +1407,9 @@ do { \
1408} while (0) 1407} while (0)
1409 1408
1410static int 1409static int
1411pc_render_add_request(struct intel_engine_cs *ring) 1410pc_render_add_request(struct drm_i915_gem_request *req)
1412{ 1411{
1412 struct intel_engine_cs *ring = req->ring;
1413 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES; 1413 u32 scratch_addr = ring->scratch.gtt_offset + 2 * CACHELINE_BYTES;
1414 int ret; 1414 int ret;
1415 1415
@@ -1429,8 +1429,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
1429 PIPE_CONTROL_WRITE_FLUSH | 1429 PIPE_CONTROL_WRITE_FLUSH |
1430 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); 1430 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
1431 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1431 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1432 intel_ring_emit(ring, 1432 intel_ring_emit(ring, i915_gem_request_get_seqno(req));
1433 i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1434 intel_ring_emit(ring, 0); 1433 intel_ring_emit(ring, 0);
1435 PIPE_CONTROL_FLUSH(ring, scratch_addr); 1434 PIPE_CONTROL_FLUSH(ring, scratch_addr);
1436 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */ 1435 scratch_addr += 2 * CACHELINE_BYTES; /* write to separate cachelines */
@@ -1449,8 +1448,7 @@ pc_render_add_request(struct intel_engine_cs *ring)
1449 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | 1448 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
1450 PIPE_CONTROL_NOTIFY); 1449 PIPE_CONTROL_NOTIFY);
1451 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT); 1450 intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
1452 intel_ring_emit(ring, 1451 intel_ring_emit(ring, i915_gem_request_get_seqno(req));
1453 i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1454 intel_ring_emit(ring, 0); 1452 intel_ring_emit(ring, 0);
1455 __intel_ring_advance(ring); 1453 __intel_ring_advance(ring);
1456 1454
@@ -1619,8 +1617,9 @@ bsd_ring_flush(struct drm_i915_gem_request *req,
1619} 1617}
1620 1618
1621static int 1619static int
1622i9xx_add_request(struct intel_engine_cs *ring) 1620i9xx_add_request(struct drm_i915_gem_request *req)
1623{ 1621{
1622 struct intel_engine_cs *ring = req->ring;
1624 int ret; 1623 int ret;
1625 1624
1626 ret = intel_ring_begin(ring, 4); 1625 ret = intel_ring_begin(ring, 4);
@@ -1629,8 +1628,7 @@ i9xx_add_request(struct intel_engine_cs *ring)
1629 1628
1630 intel_ring_emit(ring, MI_STORE_DWORD_INDEX); 1629 intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
1631 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); 1630 intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
1632 intel_ring_emit(ring, 1631 intel_ring_emit(ring, i915_gem_request_get_seqno(req));
1633 i915_gem_request_get_seqno(ring->outstanding_lazy_request));
1634 intel_ring_emit(ring, MI_USER_INTERRUPT); 1632 intel_ring_emit(ring, MI_USER_INTERRUPT);
1635 __intel_ring_advance(ring); 1633 __intel_ring_advance(ring);
1636 1634
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 8c713f625755..cb6d3d0b2530 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -183,7 +183,7 @@ struct intel_engine_cs {
183 int __must_check (*flush)(struct drm_i915_gem_request *req, 183 int __must_check (*flush)(struct drm_i915_gem_request *req,
184 u32 invalidate_domains, 184 u32 invalidate_domains,
185 u32 flush_domains); 185 u32 flush_domains);
186 int (*add_request)(struct intel_engine_cs *ring); 186 int (*add_request)(struct drm_i915_gem_request *req);
187 /* Some chipsets are not quite as coherent as advertised and need 187 /* Some chipsets are not quite as coherent as advertised and need
188 * an expensive kick to force a true read of the up-to-date seqno. 188 * an expensive kick to force a true read of the up-to-date seqno.
189 * However, the up-to-date seqno is not always required and the last 189 * However, the up-to-date seqno is not always required and the last