aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_request.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-04-30 09:15:00 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2018-04-30 11:01:11 -0400
commit52d7f16e5543ca892ae2393a716083d209ce3b36 (patch)
treed80f2a23732005d8c9653dfd9f0af57fb89448c3 /drivers/gpu/drm/i915/i915_request.c
parent5692251c254a3d561316c4e8e10c77e470b60658 (diff)
drm/i915: Stop tracking timeline->inflight_seqnos
In commit 9b6586ae9f6b ("drm/i915: Keep a global seqno per-engine"), we moved from a global inflight counter to per-engine counters in the hope that will be easy to run concurrently in future. However, with the advent of the desire to move requests between engines, we do need a global counter to preserve the semantics that no engine wraps in the middle of a submit. (Although this semantic is now only required for gen7 semaphore support, which only supports greater-then comparisons!) v2: Keep a global counter of all requests ever submitted and force the reset when it wraps. References: 9b6586ae9f6b ("drm/i915: Keep a global seqno per-engine") Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180430131503.5375-1-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/i915_request.c')
-rw-r--r--drivers/gpu/drm/i915/i915_request.c33
1 files changed, 17 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index b692a9f7c357..b1993d4a1a53 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -241,6 +241,7 @@ static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno)
241 sizeof(timeline->engine[id].global_sync)); 241 sizeof(timeline->engine[id].global_sync));
242 } 242 }
243 243
244 i915->gt.request_serial = seqno;
244 return 0; 245 return 0;
245} 246}
246 247
@@ -257,18 +258,22 @@ int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno)
257 return reset_all_global_seqno(i915, seqno - 1); 258 return reset_all_global_seqno(i915, seqno - 1);
258} 259}
259 260
260static int reserve_engine(struct intel_engine_cs *engine) 261static int reserve_gt(struct drm_i915_private *i915)
261{ 262{
262 struct drm_i915_private *i915 = engine->i915;
263 u32 active = ++engine->timeline->inflight_seqnos;
264 u32 seqno = engine->timeline->seqno;
265 int ret; 263 int ret;
266 264
267 /* Reservation is fine until we need to wrap around */ 265 /*
268 if (unlikely(add_overflows(seqno, active))) { 266 * Reservation is fine until we may need to wrap around
267 *
268 * By incrementing the serial for every request, we know that no
269 * individual engine may exceed that serial (as each is reset to 0
270 * on any wrap). This protects even the most pessimistic of migrations
271 * of every request from all engines onto just one.
272 */
273 while (unlikely(++i915->gt.request_serial == 0)) {
269 ret = reset_all_global_seqno(i915, 0); 274 ret = reset_all_global_seqno(i915, 0);
270 if (ret) { 275 if (ret) {
271 engine->timeline->inflight_seqnos--; 276 i915->gt.request_serial--;
272 return ret; 277 return ret;
273 } 278 }
274 } 279 }
@@ -279,15 +284,10 @@ static int reserve_engine(struct intel_engine_cs *engine)
279 return 0; 284 return 0;
280} 285}
281 286
282static void unreserve_engine(struct intel_engine_cs *engine) 287static void unreserve_gt(struct drm_i915_private *i915)
283{ 288{
284 struct drm_i915_private *i915 = engine->i915;
285
286 if (!--i915->gt.active_requests) 289 if (!--i915->gt.active_requests)
287 i915_gem_park(i915); 290 i915_gem_park(i915);
288
289 GEM_BUG_ON(!engine->timeline->inflight_seqnos);
290 engine->timeline->inflight_seqnos--;
291} 291}
292 292
293void i915_gem_retire_noop(struct i915_gem_active *active, 293void i915_gem_retire_noop(struct i915_gem_active *active,
@@ -362,7 +362,6 @@ static void i915_request_retire(struct i915_request *request)
362 list_del_init(&request->link); 362 list_del_init(&request->link);
363 spin_unlock_irq(&engine->timeline->lock); 363 spin_unlock_irq(&engine->timeline->lock);
364 364
365 unreserve_engine(request->engine);
366 advance_ring(request); 365 advance_ring(request);
367 366
368 free_capture_list(request); 367 free_capture_list(request);
@@ -424,6 +423,8 @@ static void i915_request_retire(struct i915_request *request)
424 } 423 }
425 spin_unlock_irq(&request->lock); 424 spin_unlock_irq(&request->lock);
426 425
426 unreserve_gt(request->i915);
427
427 i915_sched_node_fini(request->i915, &request->sched); 428 i915_sched_node_fini(request->i915, &request->sched);
428 i915_request_put(request); 429 i915_request_put(request);
429} 430}
@@ -642,7 +643,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
642 return ERR_CAST(ring); 643 return ERR_CAST(ring);
643 GEM_BUG_ON(!ring); 644 GEM_BUG_ON(!ring);
644 645
645 ret = reserve_engine(engine); 646 ret = reserve_gt(i915);
646 if (ret) 647 if (ret)
647 goto err_unpin; 648 goto err_unpin;
648 649
@@ -784,7 +785,7 @@ err_unwind:
784 785
785 kmem_cache_free(i915->requests, rq); 786 kmem_cache_free(i915->requests, rq);
786err_unreserve: 787err_unreserve:
787 unreserve_engine(engine); 788 unreserve_gt(i915);
788err_unpin: 789err_unpin:
789 engine->context_unpin(engine, ctx); 790 engine->context_unpin(engine, ctx);
790 return ERR_PTR(ret); 791 return ERR_PTR(ret);