aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_ringbuffer.c
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2017-11-10 09:26:32 -0500
committerChris Wilson <chris@chris-wilson.co.uk>2017-11-10 12:20:29 -0500
commitf4e15af7e21861445821d5f09922ef7e695269a1 (patch)
treec7c63a6c42cdffad551c625b01e7135a23a99fd0 /drivers/gpu/drm/i915/intel_ringbuffer.c
parentd378a3efb819e6d1992127122d957337571b4594 (diff)
drm/i915: Mark the context state as dirty/written
In the next few patches, we will want to both copy out of the context image and write a valid image into a new context. To be completely safe, we should then couple in our domain tracking to ensure that we don't have any issues with stale data remaining in unwanted cachelines. Historically, we omitted the .write=true from the call to set-gtt-domain in i915_switch_context() in order to avoid a stall between every request as we would want to wait for the previous context write from the gpu. Since then, we limit the set-gtt-domain to only occur when we first bind the vma, so once in use we will never stall, and we are sure to flush the context following a load from swap. Equally we never applied the lessons learnt from ringbuffer submission to execlists; so time to apply the flush of the lrc after load as well. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Acked-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20171110142634.10551-6-chris@chris-wilson.co.uk
Diffstat (limited to 'drivers/gpu/drm/i915/intel_ringbuffer.c')
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 47fadf8da84e..7e2a671882fb 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1363,12 +1363,13 @@ static int context_pin(struct i915_gem_context *ctx)
1363 struct i915_vma *vma = ctx->engine[RCS].state; 1363 struct i915_vma *vma = ctx->engine[RCS].state;
1364 int ret; 1364 int ret;
1365 1365
1366 /* Clear this page out of any CPU caches for coherent swap-in/out. 1366 /*
1367 * Clear this page out of any CPU caches for coherent swap-in/out.
1367 * We only want to do this on the first bind so that we do not stall 1368 * We only want to do this on the first bind so that we do not stall
1368 * on an active context (which by nature is already on the GPU). 1369 * on an active context (which by nature is already on the GPU).
1369 */ 1370 */
1370 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) { 1371 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1371 ret = i915_gem_object_set_to_gtt_domain(vma->obj, false); 1372 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1372 if (ret) 1373 if (ret)
1373 return ret; 1374 return ret;
1374 } 1375 }
@@ -1445,7 +1446,6 @@ intel_ring_context_pin(struct intel_engine_cs *engine,
1445 if (ret) 1446 if (ret)
1446 goto err; 1447 goto err;
1447 1448
1448 ce->state->obj->mm.dirty = true;
1449 ce->state->obj->pin_global++; 1449 ce->state->obj->pin_global++;
1450 } 1450 }
1451 1451