aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gem_request.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2016-11-14 15:41:02 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2016-11-14 16:00:28 -0500
commit52e542090701ab983a695cc33ecba19e6a0335a2 (patch)
tree5202f4349727a75b1a70868c18d916dea5c6eaaf /drivers/gpu/drm/i915/i915_gem_request.c
parent0de9136dbbc9f6882bb375270eaddf1b999081bf (diff)
drm/i915/scheduler: Record all dependencies upon request construction
The scheduler needs to know the dependencies of each request for the lifetime of the request, as it may choose to reschedule the requests at any time and must ensure the dependency tree is not broken. This is in additional to using the fence to only allow execution after all dependencies have been completed. One option was to extend the fence to support the bidirectional dependency tracking required by the scheduler. However the mismatch in lifetimes between the submit fence and the request essentially meant that we had to build a completely separate struct (and we could not simply reuse the existing waitqueue in the fence for one half of the dependency tracking). The extra dependency tracking simply did not mesh well with the fence, and keeping it separate both keeps the fence implementation simpler and allows us to extend the dependency tracking into a priority tree (whilst maintaining support for reordering the tree). To avoid the additional allocations and list manipulations, the use of the priotree is disabled when there are no schedulers to use it. v2: Create a dedicated slab for i915_dependency. Rename the lists. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: http://patchwork.freedesktop.org/patch/msgid/20161114204105.29171-7-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gem_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gem_request.c91
1 files changed, 90 insertions, 1 deletions
diff --git a/drivers/gpu/drm/i915/i915_gem_request.c b/drivers/gpu/drm/i915/i915_gem_request.c
index 1118cf48d6f0..78c87d94d205 100644
--- a/drivers/gpu/drm/i915/i915_gem_request.c
+++ b/drivers/gpu/drm/i915/i915_gem_request.c
@@ -113,6 +113,77 @@ i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
113 spin_unlock(&file_priv->mm.lock); 113 spin_unlock(&file_priv->mm.lock);
114} 114}
115 115
116static struct i915_dependency *
117i915_dependency_alloc(struct drm_i915_private *i915)
118{
119 return kmem_cache_alloc(i915->dependencies, GFP_KERNEL);
120}
121
122static void
123i915_dependency_free(struct drm_i915_private *i915,
124 struct i915_dependency *dep)
125{
126 kmem_cache_free(i915->dependencies, dep);
127}
128
129static void
130__i915_priotree_add_dependency(struct i915_priotree *pt,
131 struct i915_priotree *signal,
132 struct i915_dependency *dep,
133 unsigned long flags)
134{
135 list_add(&dep->wait_link, &signal->waiters_list);
136 list_add(&dep->signal_link, &pt->signalers_list);
137 dep->signaler = signal;
138 dep->flags = flags;
139}
140
141static int
142i915_priotree_add_dependency(struct drm_i915_private *i915,
143 struct i915_priotree *pt,
144 struct i915_priotree *signal)
145{
146 struct i915_dependency *dep;
147
148 dep = i915_dependency_alloc(i915);
149 if (!dep)
150 return -ENOMEM;
151
152 __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC);
153 return 0;
154}
155
156static void
157i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt)
158{
159 struct i915_dependency *dep, *next;
160
161 /* Everyone we depended upon (the fences we wait to be signaled)
162 * should retire before us and remove themselves from our list.
163 * However, retirement is run independently on each timeline and
164 * so we may be called out-of-order.
165 */
166 list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) {
167 list_del(&dep->wait_link);
168 if (dep->flags & I915_DEPENDENCY_ALLOC)
169 i915_dependency_free(i915, dep);
170 }
171
172 /* Remove ourselves from everyone who depends upon us */
173 list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) {
174 list_del(&dep->signal_link);
175 if (dep->flags & I915_DEPENDENCY_ALLOC)
176 i915_dependency_free(i915, dep);
177 }
178}
179
180static void
181i915_priotree_init(struct i915_priotree *pt)
182{
183 INIT_LIST_HEAD(&pt->signalers_list);
184 INIT_LIST_HEAD(&pt->waiters_list);
185}
186
116void i915_gem_retire_noop(struct i915_gem_active *active, 187void i915_gem_retire_noop(struct i915_gem_active *active,
117 struct drm_i915_gem_request *request) 188 struct drm_i915_gem_request *request)
118{ 189{
@@ -182,6 +253,8 @@ static void i915_gem_request_retire(struct drm_i915_gem_request *request)
182 i915_gem_context_put(request->ctx); 253 i915_gem_context_put(request->ctx);
183 254
184 dma_fence_signal(&request->fence); 255 dma_fence_signal(&request->fence);
256
257 i915_priotree_fini(request->i915, &request->priotree);
185 i915_gem_request_put(request); 258 i915_gem_request_put(request);
186} 259}
187 260
@@ -467,6 +540,8 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
467 */ 540 */
468 i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq); 541 i915_sw_fence_await_sw_fence(&req->execute, &req->submit, &req->execq);
469 542
543 i915_priotree_init(&req->priotree);
544
470 INIT_LIST_HEAD(&req->active_list); 545 INIT_LIST_HEAD(&req->active_list);
471 req->i915 = dev_priv; 546 req->i915 = dev_priv;
472 req->engine = engine; 547 req->engine = engine;
@@ -520,6 +595,14 @@ i915_gem_request_await_request(struct drm_i915_gem_request *to,
520 595
521 GEM_BUG_ON(to == from); 596 GEM_BUG_ON(to == from);
522 597
598 if (to->engine->schedule) {
599 ret = i915_priotree_add_dependency(to->i915,
600 &to->priotree,
601 &from->priotree);
602 if (ret < 0)
603 return ret;
604 }
605
523 if (to->timeline == from->timeline) 606 if (to->timeline == from->timeline)
524 return 0; 607 return 0;
525 608
@@ -743,9 +826,15 @@ void __i915_add_request(struct drm_i915_gem_request *request, bool flush_caches)
743 826
744 prev = i915_gem_active_raw(&timeline->last_request, 827 prev = i915_gem_active_raw(&timeline->last_request,
745 &request->i915->drm.struct_mutex); 828 &request->i915->drm.struct_mutex);
746 if (prev) 829 if (prev) {
747 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, 830 i915_sw_fence_await_sw_fence(&request->submit, &prev->submit,
748 &request->submitq); 831 &request->submitq);
832 if (engine->schedule)
833 __i915_priotree_add_dependency(&request->priotree,
834 &prev->priotree,
835 &request->dep,
836 0);
837 }
749 838
750 spin_lock_irq(&timeline->lock); 839 spin_lock_irq(&timeline->lock);
751 list_add_tail(&request->link, &timeline->requests); 840 list_add_tail(&request->link, &timeline->requests);