aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2014-01-27 17:43:07 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2014-02-06 11:43:13 -0500
commit1f70999f9052f5a1b0ce1a55aff3808f2ec9fe42 (patch)
tree180e47976121d75fb9a919b39cea60bd444cf377 /drivers/gpu/drm/i915/intel_ringbuffer.c
parent011cf577b2531dfbd2254bd9ec147ad71471abaf (diff)
drm/i915: Prevent recursion by retiring requests when the ring is full
As the VM do not track activity of objects and instead use a large hammer to forcibly idle and evict all of their associated objects when one is released, it is possible for that to cause a recursion when we need to wait for free space on a ring and call retire requests. (intel_ring_begin -> intel_ring_wait_request -> i915_gem_retire_requests_ring -> i915_gem_context_free -> i915_gem_evict_vm -> i915_gpu_idle -> intel_ring_begin etc) In order to remove the requirement for calling retire-requests from intel_ring_wait_request, we have to inline a couple of steps from retiring requests, notably we have to record the position of the request we wait for and use that to update the available ring space. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Ville Syrjälä <ville.syrjala@linux.intel.com> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c25
1 files changed, 5 insertions, 20 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index d897a19f887f..ba686d75ff32 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1430,28 +1430,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
1430 cleanup_status_page(ring); 1430 cleanup_status_page(ring);
1431} 1431}
1432 1432
1433static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
1434{
1435 int ret;
1436
1437 ret = i915_wait_seqno(ring, seqno);
1438 if (!ret)
1439 i915_gem_retire_requests_ring(ring);
1440
1441 return ret;
1442}
1443
1444static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) 1433static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1445{ 1434{
1446 struct drm_i915_gem_request *request; 1435 struct drm_i915_gem_request *request;
1447 u32 seqno = 0; 1436 u32 seqno = 0, tail;
1448 int ret; 1437 int ret;
1449 1438
1450 i915_gem_retire_requests_ring(ring);
1451
1452 if (ring->last_retired_head != -1) { 1439 if (ring->last_retired_head != -1) {
1453 ring->head = ring->last_retired_head; 1440 ring->head = ring->last_retired_head;
1454 ring->last_retired_head = -1; 1441 ring->last_retired_head = -1;
1442
1455 ring->space = ring_space(ring); 1443 ring->space = ring_space(ring);
1456 if (ring->space >= n) 1444 if (ring->space >= n)
1457 return 0; 1445 return 0;
@@ -1468,6 +1456,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1468 space += ring->size; 1456 space += ring->size;
1469 if (space >= n) { 1457 if (space >= n) {
1470 seqno = request->seqno; 1458 seqno = request->seqno;
1459 tail = request->tail;
1471 break; 1460 break;
1472 } 1461 }
1473 1462
@@ -1482,15 +1471,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
1482 if (seqno == 0) 1471 if (seqno == 0)
1483 return -ENOSPC; 1472 return -ENOSPC;
1484 1473
1485 ret = intel_ring_wait_seqno(ring, seqno); 1474 ret = i915_wait_seqno(ring, seqno);
1486 if (ret) 1475 if (ret)
1487 return ret; 1476 return ret;
1488 1477
1489 if (WARN_ON(ring->last_retired_head == -1)) 1478 ring->head = tail;
1490 return -ENOSPC;
1491
1492 ring->head = ring->last_retired_head;
1493 ring->last_retired_head = -1;
1494 ring->space = ring_space(ring); 1479 ring->space = ring_space(ring);
1495 if (WARN_ON(ring->space < n)) 1480 if (WARN_ON(ring->space < n))
1496 return -ENOSPC; 1481 return -ENOSPC;