aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h2
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c29
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c2
-rw-r--r--drivers/gpu/drm/i915/i915_guc_submission.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c17
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
8 files changed, 31 insertions, 36 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0b3550f05026..37c2c5009d9a 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -1961,7 +1961,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
1961 seq_puts(m, "HW context "); 1961 seq_puts(m, "HW context ");
1962 describe_ctx(m, ctx); 1962 describe_ctx(m, ctx);
1963 for_each_ring(ring, dev_priv, i) { 1963 for_each_ring(ring, dev_priv, i) {
1964 if (ring->default_context == ctx) 1964 if (dev_priv->kernel_context == ctx)
1965 seq_printf(m, "(default context %s) ", 1965 seq_printf(m, "(default context %s) ",
1966 ring->name); 1966 ring->name);
1967 } 1967 }
@@ -2058,7 +2058,7 @@ static int i915_dump_lrc(struct seq_file *m, void *unused)
2058 2058
2059 list_for_each_entry(ctx, &dev_priv->context_list, link) { 2059 list_for_each_entry(ctx, &dev_priv->context_list, link) {
2060 for_each_ring(ring, dev_priv, i) { 2060 for_each_ring(ring, dev_priv, i) {
2061 if (ring->default_context != ctx) 2061 if (dev_priv->kernel_context != ctx)
2062 i915_dump_lrc_obj(m, ctx, ring); 2062 i915_dump_lrc_obj(m, ctx, ring);
2063 } 2063 }
2064 } 2064 }
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 125659488756..840368de3f4f 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1948,6 +1948,8 @@ struct drm_i915_private {
1948 void (*stop_ring)(struct intel_engine_cs *ring); 1948 void (*stop_ring)(struct intel_engine_cs *ring);
1949 } gt; 1949 } gt;
1950 1950
1951 struct intel_context *kernel_context;
1952
1951 bool edp_low_vswing; 1953 bool edp_low_vswing;
1952 1954
1953 /* perform PHY state sanity checks? */ 1955 /* perform PHY state sanity checks? */
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8e716b6b5d59..06abe1bf5afc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2680,7 +2680,7 @@ void i915_gem_request_free(struct kref *req_ref)
2680 2680
2681 if (ctx) { 2681 if (ctx) {
2682 if (i915.enable_execlists) { 2682 if (i915.enable_execlists) {
2683 if (ctx != req->ring->default_context) 2683 if (ctx != req->i915->kernel_context)
2684 intel_lr_context_unpin(req); 2684 intel_lr_context_unpin(req);
2685 } 2685 }
2686 2686
@@ -2776,7 +2776,7 @@ i915_gem_request_alloc(struct intel_engine_cs *engine,
2776 int err; 2776 int err;
2777 2777
2778 if (ctx == NULL) 2778 if (ctx == NULL)
2779 ctx = engine->default_context; 2779 ctx = to_i915(engine->dev)->kernel_context;
2780 err = __i915_gem_request_alloc(engine, ctx, &req); 2780 err = __i915_gem_request_alloc(engine, ctx, &req);
2781 return err ? ERR_PTR(err) : req; 2781 return err ? ERR_PTR(err) : req;
2782} 2782}
@@ -4864,7 +4864,7 @@ i915_gem_init_hw(struct drm_device *dev)
4864 */ 4864 */
4865 init_unused_rings(dev); 4865 init_unused_rings(dev);
4866 4866
4867 BUG_ON(!dev_priv->ring[RCS].default_context); 4867 BUG_ON(!dev_priv->kernel_context);
4868 4868
4869 ret = i915_ppgtt_init_hw(dev); 4869 ret = i915_ppgtt_init_hw(dev);
4870 if (ret) { 4870 if (ret) {
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index c25083c78ba7..6a4f64b03db6 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,22 +347,20 @@ void i915_gem_context_reset(struct drm_device *dev)
347 i915_gem_context_unreference(lctx); 347 i915_gem_context_unreference(lctx);
348 ring->last_context = NULL; 348 ring->last_context = NULL;
349 } 349 }
350
351 /* Force the GPU state to be reinitialised on enabling */
352 if (ring->default_context)
353 ring->default_context->legacy_hw_ctx.initialized = false;
354 } 350 }
351
352 /* Force the GPU state to be reinitialised on enabling */
353 dev_priv->kernel_context->legacy_hw_ctx.initialized = false;
355} 354}
356 355
357int i915_gem_context_init(struct drm_device *dev) 356int i915_gem_context_init(struct drm_device *dev)
358{ 357{
359 struct drm_i915_private *dev_priv = dev->dev_private; 358 struct drm_i915_private *dev_priv = dev->dev_private;
360 struct intel_context *ctx; 359 struct intel_context *ctx;
361 int i;
362 360
363 /* Init should only be called once per module load. Eventually the 361 /* Init should only be called once per module load. Eventually the
364 * restriction on the context_disabled check can be loosened. */ 362 * restriction on the context_disabled check can be loosened. */
365 if (WARN_ON(dev_priv->ring[RCS].default_context)) 363 if (WARN_ON(dev_priv->kernel_context))
366 return 0; 364 return 0;
367 365
368 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) { 366 if (intel_vgpu_active(dev) && HAS_LOGICAL_RING_CONTEXTS(dev)) {
@@ -392,12 +390,7 @@ int i915_gem_context_init(struct drm_device *dev)
392 return PTR_ERR(ctx); 390 return PTR_ERR(ctx);
393 } 391 }
394 392
395 for (i = 0; i < I915_NUM_RINGS; i++) { 393 dev_priv->kernel_context = ctx;
396 struct intel_engine_cs *ring = &dev_priv->ring[i];
397
398 /* NB: RCS will hold a ref for all rings */
399 ring->default_context = ctx;
400 }
401 394
402 DRM_DEBUG_DRIVER("%s context support initialized\n", 395 DRM_DEBUG_DRIVER("%s context support initialized\n",
403 i915.enable_execlists ? "LR" : 396 i915.enable_execlists ? "LR" :
@@ -408,7 +401,7 @@ int i915_gem_context_init(struct drm_device *dev)
408void i915_gem_context_fini(struct drm_device *dev) 401void i915_gem_context_fini(struct drm_device *dev)
409{ 402{
410 struct drm_i915_private *dev_priv = dev->dev_private; 403 struct drm_i915_private *dev_priv = dev->dev_private;
411 struct intel_context *dctx = dev_priv->ring[RCS].default_context; 404 struct intel_context *dctx = dev_priv->kernel_context;
412 int i; 405 int i;
413 406
414 if (dctx->legacy_hw_ctx.rcs_state) { 407 if (dctx->legacy_hw_ctx.rcs_state) {
@@ -435,17 +428,17 @@ void i915_gem_context_fini(struct drm_device *dev)
435 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state); 428 i915_gem_object_ggtt_unpin(dctx->legacy_hw_ctx.rcs_state);
436 } 429 }
437 430
438 for (i = 0; i < I915_NUM_RINGS; i++) { 431 for (i = I915_NUM_RINGS; --i >= 0;) {
439 struct intel_engine_cs *ring = &dev_priv->ring[i]; 432 struct intel_engine_cs *ring = &dev_priv->ring[i];
440 433
441 if (ring->last_context) 434 if (ring->last_context) {
442 i915_gem_context_unreference(ring->last_context); 435 i915_gem_context_unreference(ring->last_context);
443 436 ring->last_context = NULL;
444 ring->default_context = NULL; 437 }
445 ring->last_context = NULL;
446 } 438 }
447 439
448 i915_gem_context_unreference(dctx); 440 i915_gem_context_unreference(dctx);
441 dev_priv->kernel_context = NULL;
449} 442}
450 443
451int i915_gem_context_enable(struct drm_i915_gem_request *req) 444int i915_gem_context_enable(struct drm_i915_gem_request *req)
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 06ca4082735b..7eeb24427785 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1050,7 +1050,7 @@ static void i915_gem_record_rings(struct drm_device *dev,
1050 if (request) 1050 if (request)
1051 rbuf = request->ctx->engine[ring->id].ringbuf; 1051 rbuf = request->ctx->engine[ring->id].ringbuf;
1052 else 1052 else
1053 rbuf = ring->default_context->engine[ring->id].ringbuf; 1053 rbuf = dev_priv->kernel_context->engine[ring->id].ringbuf;
1054 } else 1054 } else
1055 rbuf = ring->buffer; 1055 rbuf = ring->buffer;
1056 1056
diff --git a/drivers/gpu/drm/i915/i915_guc_submission.c b/drivers/gpu/drm/i915/i915_guc_submission.c
index 9c244247c13e..51ae5c1f806d 100644
--- a/drivers/gpu/drm/i915/i915_guc_submission.c
+++ b/drivers/gpu/drm/i915/i915_guc_submission.c
@@ -964,7 +964,7 @@ int i915_guc_submission_enable(struct drm_device *dev)
964{ 964{
965 struct drm_i915_private *dev_priv = dev->dev_private; 965 struct drm_i915_private *dev_priv = dev->dev_private;
966 struct intel_guc *guc = &dev_priv->guc; 966 struct intel_guc *guc = &dev_priv->guc;
967 struct intel_context *ctx = dev_priv->ring[RCS].default_context; 967 struct intel_context *ctx = dev_priv->kernel_context;
968 struct i915_guc_client *client; 968 struct i915_guc_client *client;
969 969
970 /* client for execbuf submission */ 970 /* client for execbuf submission */
@@ -1021,7 +1021,7 @@ int intel_guc_suspend(struct drm_device *dev)
1021 if (!i915.enable_guc_submission) 1021 if (!i915.enable_guc_submission)
1022 return 0; 1022 return 0;
1023 1023
1024 ctx = dev_priv->ring[RCS].default_context; 1024 ctx = dev_priv->kernel_context;
1025 1025
1026 data[0] = HOST2GUC_ACTION_ENTER_S_STATE; 1026 data[0] = HOST2GUC_ACTION_ENTER_S_STATE;
1027 /* any value greater than GUC_POWER_D0 */ 1027 /* any value greater than GUC_POWER_D0 */
@@ -1047,7 +1047,7 @@ int intel_guc_resume(struct drm_device *dev)
1047 if (!i915.enable_guc_submission) 1047 if (!i915.enable_guc_submission)
1048 return 0; 1048 return 0;
1049 1049
1050 ctx = dev_priv->ring[RCS].default_context; 1050 ctx = dev_priv->kernel_context;
1051 1051
1052 data[0] = HOST2GUC_ACTION_EXIT_S_STATE; 1052 data[0] = HOST2GUC_ACTION_EXIT_S_STATE;
1053 data[1] = GUC_POWER_D0; 1053 data[1] = GUC_POWER_D0;
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ec2482daffa6..2c6da4013b1a 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -598,7 +598,7 @@ static int execlists_context_queue(struct drm_i915_gem_request *request)
598 struct drm_i915_gem_request *cursor; 598 struct drm_i915_gem_request *cursor;
599 int num_elements = 0; 599 int num_elements = 0;
600 600
601 if (request->ctx != ring->default_context) 601 if (request->ctx != request->i915->kernel_context)
602 intel_lr_context_pin(request); 602 intel_lr_context_pin(request);
603 603
604 i915_gem_request_reference(request); 604 i915_gem_request_reference(request);
@@ -690,7 +690,7 @@ int intel_logical_ring_alloc_request_extras(struct drm_i915_gem_request *request
690 690
691 request->ringbuf = request->ctx->engine[request->ring->id].ringbuf; 691 request->ringbuf = request->ctx->engine[request->ring->id].ringbuf;
692 692
693 if (request->ctx != request->ring->default_context) { 693 if (request->ctx != request->i915->kernel_context) {
694 ret = intel_lr_context_pin(request); 694 ret = intel_lr_context_pin(request);
695 if (ret) 695 if (ret)
696 return ret; 696 return ret;
@@ -1006,7 +1006,7 @@ void intel_execlists_retire_requests(struct intel_engine_cs *ring)
1006 struct drm_i915_gem_object *ctx_obj = 1006 struct drm_i915_gem_object *ctx_obj =
1007 ctx->engine[ring->id].state; 1007 ctx->engine[ring->id].state;
1008 1008
1009 if (ctx_obj && (ctx != ring->default_context)) 1009 if (ctx_obj && (ctx != req->i915->kernel_context))
1010 intel_lr_context_unpin(req); 1010 intel_lr_context_unpin(req);
1011 list_del(&req->execlist_link); 1011 list_del(&req->execlist_link);
1012 i915_gem_request_unreference(req); 1012 i915_gem_request_unreference(req);
@@ -1529,7 +1529,7 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
1529 u8 next_context_status_buffer_hw; 1529 u8 next_context_status_buffer_hw;
1530 1530
1531 lrc_setup_hardware_status_page(ring, 1531 lrc_setup_hardware_status_page(ring,
1532 ring->default_context->engine[ring->id].state); 1532 dev_priv->kernel_context->engine[ring->id].state);
1533 1533
1534 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask)); 1534 I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
1535 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff); 1535 I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
@@ -2005,6 +2005,7 @@ logical_ring_default_irqs(struct intel_engine_cs *ring, unsigned shift)
2005static int 2005static int
2006logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring) 2006logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
2007{ 2007{
2008 struct intel_context *dctx = to_i915(dev)->kernel_context;
2008 int ret; 2009 int ret;
2009 2010
2010 /* Intentionally left blank. */ 2011 /* Intentionally left blank. */
@@ -2027,12 +2028,12 @@ logical_ring_init(struct drm_device *dev, struct intel_engine_cs *ring)
2027 if (ret) 2028 if (ret)
2028 goto error; 2029 goto error;
2029 2030
2030 ret = intel_lr_context_deferred_alloc(ring->default_context, ring); 2031 ret = intel_lr_context_deferred_alloc(dctx, ring);
2031 if (ret) 2032 if (ret)
2032 goto error; 2033 goto error;
2033 2034
2034 /* As this is the default context, always pin it */ 2035 /* As this is the default context, always pin it */
2035 ret = intel_lr_context_do_pin(ring, ring->default_context); 2036 ret = intel_lr_context_do_pin(ring, dctx);
2036 if (ret) { 2037 if (ret) {
2037 DRM_ERROR( 2038 DRM_ERROR(
2038 "Failed to pin and map ringbuffer %s: %d\n", 2039 "Failed to pin and map ringbuffer %s: %d\n",
@@ -2398,7 +2399,7 @@ void intel_lr_context_free(struct intel_context *ctx)
2398 ctx->engine[i].ringbuf; 2399 ctx->engine[i].ringbuf;
2399 struct intel_engine_cs *ring = ringbuf->ring; 2400 struct intel_engine_cs *ring = ringbuf->ring;
2400 2401
2401 if (ctx == ring->default_context) { 2402 if (ctx == ctx->i915->kernel_context) {
2402 intel_unpin_ringbuffer_obj(ringbuf); 2403 intel_unpin_ringbuffer_obj(ringbuf);
2403 i915_gem_object_ggtt_unpin(ctx_obj); 2404 i915_gem_object_ggtt_unpin(ctx_obj);
2404 } 2405 }
@@ -2517,7 +2518,7 @@ int intel_lr_context_deferred_alloc(struct intel_context *ctx,
2517 ctx->engine[ring->id].ringbuf = ringbuf; 2518 ctx->engine[ring->id].ringbuf = ringbuf;
2518 ctx->engine[ring->id].state = ctx_obj; 2519 ctx->engine[ring->id].state = ctx_obj;
2519 2520
2520 if (ctx != ring->default_context && ring->init_context) { 2521 if (ctx != ctx->i915->kernel_context && ring->init_context) {
2521 struct drm_i915_gem_request *req; 2522 struct drm_i915_gem_request *req;
2522 2523
2523 req = i915_gem_request_alloc(ring, ctx); 2524 req = i915_gem_request_alloc(ring, ctx);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 91ac8a9bd903..0014fcaa5a0c 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -309,7 +309,6 @@ struct intel_engine_cs {
309 309
310 wait_queue_head_t irq_queue; 310 wait_queue_head_t irq_queue;
311 311
312 struct intel_context *default_context;
313 struct intel_context *last_context; 312 struct intel_context *last_context;
314 313
315 struct intel_ring_hangcheck hangcheck; 314 struct intel_ring_hangcheck hangcheck;