aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem.c
diff options
context:
space:
mode:
authorBen Widawsky <ben@bwidawsk.net>2013-12-06 17:11:22 -0500
committerDaniel Vetter <daniel.vetter@ffwll.ch>2013-12-18 09:52:51 -0500
commite20780439b26ba95aeb29d3e27cd8cc32bc82a4c (patch)
tree3aec99c77a82a0588f586fd6949994e87148944a /drivers/gpu/drm/i915/i915_gem.c
parent41bde5535a7d48876095926bb55b1aed5ccd6b2c (diff)
drm/i915: Defer request freeing
With context destruction, we always want to be able to tear down the underlying address space. This is invoked on the last unreference to the context which could happen before we've moved all objects to the inactive list. To enable a clean tear down the address space, make sure to process the request free lastly. Without this change, we cannot guarantee to we don't still have active objects in the VM. As an example of a failing case: CTX-A is created, count=1 CTX-A is used during execbuf does a context switch count = 2 and add_request count = 3 CTX B runs, switches, CTX-A count = 2 CTX-A is destroyed, count = 1 retire requests is called free_request from CTX-A, count = 0 <--- free context with active object As mentioned above, by doing the free request after processing the active list, we can avoid this case. Signed-off-by: Ben Widawsky <ben@bwidawsk.net> Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 89e2f92e8335..99c05e3cf419 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2423,6 +2423,8 @@ void i915_gem_reset(struct drm_device *dev)
2423void 2423void
2424i915_gem_retire_requests_ring(struct intel_ring_buffer *ring) 2424i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2425{ 2425{
2426 LIST_HEAD(deferred_request_free);
2427 struct drm_i915_gem_request *request;
2426 uint32_t seqno; 2428 uint32_t seqno;
2427 2429
2428 if (list_empty(&ring->request_list)) 2430 if (list_empty(&ring->request_list))
@@ -2433,8 +2435,6 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2433 seqno = ring->get_seqno(ring, true); 2435 seqno = ring->get_seqno(ring, true);
2434 2436
2435 while (!list_empty(&ring->request_list)) { 2437 while (!list_empty(&ring->request_list)) {
2436 struct drm_i915_gem_request *request;
2437
2438 request = list_first_entry(&ring->request_list, 2438 request = list_first_entry(&ring->request_list,
2439 struct drm_i915_gem_request, 2439 struct drm_i915_gem_request,
2440 list); 2440 list);
@@ -2450,7 +2450,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2450 */ 2450 */
2451 ring->last_retired_head = request->tail; 2451 ring->last_retired_head = request->tail;
2452 2452
2453 i915_gem_free_request(request); 2453 list_move_tail(&request->list, &deferred_request_free);
2454 } 2454 }
2455 2455
2456 /* Move any buffers on the active list that are no longer referenced 2456 /* Move any buffers on the active list that are no longer referenced
@@ -2475,6 +2475,13 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
2475 ring->trace_irq_seqno = 0; 2475 ring->trace_irq_seqno = 0;
2476 } 2476 }
2477 2477
2478 /* Finish processing active list before freeing request */
2479 while (!list_empty(&deferred_request_free)) {
2480 request = list_first_entry(&deferred_request_free,
2481 struct drm_i915_gem_request,
2482 list);
2483 i915_gem_free_request(request);
2484 }
2478 WARN_ON(i915_verify_lists(ring->dev)); 2485 WARN_ON(i915_verify_lists(ring->dev));
2479} 2486}
2480 2487