aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-05-17 17:26:30 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2018-05-18 04:35:17 -0400
commit4e0d64dba816adf18c17488d38ede67a3d0e9b40 (patch)
tree76243fca2f9e23ab2f127f6d5aa1fc636c83f0b5
parentc8af5274c3cbacb0905a26bcdef85901216e1134 (diff)
drm/i915: Move request->ctx aside
In the next patch, we want to store the intel_context pointer inside i915_request, as it is frequently access via a convoluted dance when submitting the request to hw. Having two context pointers inside i915_request leads to confusion so first rename the existing i915_gem_context pointer to i915_request.gem_context. Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180517212633.24934-1-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c4
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c18
-rw-r--r--drivers/gpu/drm/i915/i915_request.c12
-rw-r--r--drivers/gpu/drm/i915/i915_request.h2
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h10
-rw-r--r--drivers/gpu/drm/i915/intel_engine_cs.c2
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c7
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c31
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c12
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_hangcheck.c5
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_lrc.c2
13 files changed, 64 insertions, 55 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index c2d183b91500..17f9f8d7e148 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -205,7 +205,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
205 205
206static inline bool is_gvt_request(struct i915_request *req) 206static inline bool is_gvt_request(struct i915_request *req)
207{ 207{
208 return i915_gem_context_force_single_submission(req->ctx); 208 return i915_gem_context_force_single_submission(req->gem_context);
209} 209}
210 210
211static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) 211static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
@@ -305,7 +305,7 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
305 struct i915_request *req = workload->req; 305 struct i915_request *req = workload->req;
306 306
307 if (IS_KABYLAKE(req->i915) && 307 if (IS_KABYLAKE(req->i915) &&
308 is_inhibit_context(req->ctx, req->engine->id)) 308 is_inhibit_context(req->gem_context, req->engine->id))
309 intel_vgpu_restore_inhibit_context(vgpu, req); 309 intel_vgpu_restore_inhibit_context(vgpu, req);
310 310
311 /* allocate shadow ring buffer */ 311 /* allocate shadow ring buffer */
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d78beaabc051..52515445ac40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -542,8 +542,8 @@ static int i915_gem_object_info(struct seq_file *m, void *data)
542 struct i915_request, 542 struct i915_request,
543 client_link); 543 client_link);
544 rcu_read_lock(); 544 rcu_read_lock();
545 task = pid_task(request && request->ctx->pid ? 545 task = pid_task(request && request->gem_context->pid ?
546 request->ctx->pid : file->pid, 546 request->gem_context->pid : file->pid,
547 PIDTYPE_PID); 547 PIDTYPE_PID);
548 print_file_stats(m, task ? task->comm : "<unknown>", stats); 548 print_file_stats(m, task ? task->comm : "<unknown>", stats);
549 rcu_read_unlock(); 549 rcu_read_unlock();
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b0fe452ce17c..a20f8db5729d 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3067,7 +3067,7 @@ static void skip_request(struct i915_request *request)
3067static void engine_skip_context(struct i915_request *request) 3067static void engine_skip_context(struct i915_request *request)
3068{ 3068{
3069 struct intel_engine_cs *engine = request->engine; 3069 struct intel_engine_cs *engine = request->engine;
3070 struct i915_gem_context *hung_ctx = request->ctx; 3070 struct i915_gem_context *hung_ctx = request->gem_context;
3071 struct i915_timeline *timeline = request->timeline; 3071 struct i915_timeline *timeline = request->timeline;
3072 unsigned long flags; 3072 unsigned long flags;
3073 3073
@@ -3077,7 +3077,7 @@ static void engine_skip_context(struct i915_request *request)
3077 spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING); 3077 spin_lock_nested(&timeline->lock, SINGLE_DEPTH_NESTING);
3078 3078
3079 list_for_each_entry_continue(request, &engine->timeline.requests, link) 3079 list_for_each_entry_continue(request, &engine->timeline.requests, link)
3080 if (request->ctx == hung_ctx) 3080 if (request->gem_context == hung_ctx)
3081 skip_request(request); 3081 skip_request(request);
3082 3082
3083 list_for_each_entry(request, &timeline->requests, link) 3083 list_for_each_entry(request, &timeline->requests, link)
@@ -3123,11 +3123,11 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
3123 } 3123 }
3124 3124
3125 if (stalled) { 3125 if (stalled) {
3126 i915_gem_context_mark_guilty(request->ctx); 3126 i915_gem_context_mark_guilty(request->gem_context);
3127 skip_request(request); 3127 skip_request(request);
3128 3128
3129 /* If this context is now banned, skip all pending requests. */ 3129 /* If this context is now banned, skip all pending requests. */
3130 if (i915_gem_context_is_banned(request->ctx)) 3130 if (i915_gem_context_is_banned(request->gem_context))
3131 engine_skip_context(request); 3131 engine_skip_context(request);
3132 } else { 3132 } else {
3133 /* 3133 /*
@@ -3137,7 +3137,7 @@ i915_gem_reset_request(struct intel_engine_cs *engine,
3137 */ 3137 */
3138 request = i915_gem_find_active_request(engine); 3138 request = i915_gem_find_active_request(engine);
3139 if (request) { 3139 if (request) {
3140 i915_gem_context_mark_innocent(request->ctx); 3140 i915_gem_context_mark_innocent(request->gem_context);
3141 dma_fence_set_error(&request->fence, -EAGAIN); 3141 dma_fence_set_error(&request->fence, -EAGAIN);
3142 3142
3143 /* Rewind the engine to replay the incomplete rq */ 3143 /* Rewind the engine to replay the incomplete rq */
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index efb808272460..37c9a42654ba 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -1287,9 +1287,11 @@ static void error_record_engine_registers(struct i915_gpu_state *error,
1287static void record_request(struct i915_request *request, 1287static void record_request(struct i915_request *request,
1288 struct drm_i915_error_request *erq) 1288 struct drm_i915_error_request *erq)
1289{ 1289{
1290 erq->context = request->ctx->hw_id; 1290 struct i915_gem_context *ctx = request->gem_context;
1291
1292 erq->context = ctx->hw_id;
1291 erq->sched_attr = request->sched.attr; 1293 erq->sched_attr = request->sched.attr;
1292 erq->ban_score = atomic_read(&request->ctx->ban_score); 1294 erq->ban_score = atomic_read(&ctx->ban_score);
1293 erq->seqno = request->global_seqno; 1295 erq->seqno = request->global_seqno;
1294 erq->jiffies = request->emitted_jiffies; 1296 erq->jiffies = request->emitted_jiffies;
1295 erq->start = i915_ggtt_offset(request->ring->vma); 1297 erq->start = i915_ggtt_offset(request->ring->vma);
@@ -1297,7 +1299,7 @@ static void record_request(struct i915_request *request,
1297 erq->tail = request->tail; 1299 erq->tail = request->tail;
1298 1300
1299 rcu_read_lock(); 1301 rcu_read_lock();
1300 erq->pid = request->ctx->pid ? pid_nr(request->ctx->pid) : 0; 1302 erq->pid = ctx->pid ? pid_nr(ctx->pid) : 0;
1301 rcu_read_unlock(); 1303 rcu_read_unlock();
1302} 1304}
1303 1305
@@ -1461,12 +1463,12 @@ static void gem_record_rings(struct i915_gpu_state *error)
1461 1463
1462 request = i915_gem_find_active_request(engine); 1464 request = i915_gem_find_active_request(engine);
1463 if (request) { 1465 if (request) {
1466 struct i915_gem_context *ctx = request->gem_context;
1464 struct intel_ring *ring; 1467 struct intel_ring *ring;
1465 1468
1466 ee->vm = request->ctx->ppgtt ? 1469 ee->vm = ctx->ppgtt ? &ctx->ppgtt->base : &ggtt->base;
1467 &request->ctx->ppgtt->base : &ggtt->base;
1468 1470
1469 record_context(&ee->context, request->ctx); 1471 record_context(&ee->context, ctx);
1470 1472
1471 /* We need to copy these to an anonymous buffer 1473 /* We need to copy these to an anonymous buffer
1472 * as the simplest method to avoid being overwritten 1474 * as the simplest method to avoid being overwritten
@@ -1483,11 +1485,11 @@ static void gem_record_rings(struct i915_gpu_state *error)
1483 1485
1484 ee->ctx = 1486 ee->ctx =
1485 i915_error_object_create(i915, 1487 i915_error_object_create(i915,
1486 to_intel_context(request->ctx, 1488 to_intel_context(ctx,
1487 engine)->state); 1489 engine)->state);
1488 1490
1489 error->simulated |= 1491 error->simulated |=
1490 i915_gem_context_no_error_capture(request->ctx); 1492 i915_gem_context_no_error_capture(ctx);
1491 1493
1492 ee->rq_head = request->head; 1494 ee->rq_head = request->head;
1493 ee->rq_post = request->postfix; 1495 ee->rq_post = request->postfix;
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index 8928894dd9c7..fe8810a6a339 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -384,7 +384,7 @@ static void __retire_engine_request(struct intel_engine_cs *engine,
384 */ 384 */
385 if (engine->last_retired_context) 385 if (engine->last_retired_context)
386 intel_context_unpin(engine->last_retired_context, engine); 386 intel_context_unpin(engine->last_retired_context, engine);
387 engine->last_retired_context = rq->ctx; 387 engine->last_retired_context = rq->gem_context;
388} 388}
389 389
390static void __retire_engine_upto(struct intel_engine_cs *engine, 390static void __retire_engine_upto(struct intel_engine_cs *engine,
@@ -455,8 +455,8 @@ static void i915_request_retire(struct i915_request *request)
455 i915_request_remove_from_client(request); 455 i915_request_remove_from_client(request);
456 456
457 /* Retirement decays the ban score as it is a sign of ctx progress */ 457 /* Retirement decays the ban score as it is a sign of ctx progress */
458 atomic_dec_if_positive(&request->ctx->ban_score); 458 atomic_dec_if_positive(&request->gem_context->ban_score);
459 intel_context_unpin(request->ctx, request->engine); 459 intel_context_unpin(request->gem_context, request->engine);
460 460
461 __retire_engine_upto(request->engine, request); 461 __retire_engine_upto(request->engine, request);
462 462
@@ -760,7 +760,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
760 INIT_LIST_HEAD(&rq->active_list); 760 INIT_LIST_HEAD(&rq->active_list);
761 rq->i915 = i915; 761 rq->i915 = i915;
762 rq->engine = engine; 762 rq->engine = engine;
763 rq->ctx = ctx; 763 rq->gem_context = ctx;
764 rq->ring = ring; 764 rq->ring = ring;
765 rq->timeline = ring->timeline; 765 rq->timeline = ring->timeline;
766 GEM_BUG_ON(rq->timeline == &engine->timeline); 766 GEM_BUG_ON(rq->timeline == &engine->timeline);
@@ -814,7 +814,7 @@ i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx)
814 goto err_unwind; 814 goto err_unwind;
815 815
816 /* Keep a second pin for the dual retirement along engine and ring */ 816 /* Keep a second pin for the dual retirement along engine and ring */
817 __intel_context_pin(rq->ctx, engine); 817 __intel_context_pin(rq->gem_context, engine);
818 818
819 /* Check that we didn't interrupt ourselves with a new request */ 819 /* Check that we didn't interrupt ourselves with a new request */
820 GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); 820 GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno);
@@ -1113,7 +1113,7 @@ void __i915_request_add(struct i915_request *request, bool flush_caches)
1113 local_bh_disable(); 1113 local_bh_disable();
1114 rcu_read_lock(); /* RCU serialisation for set-wedged protection */ 1114 rcu_read_lock(); /* RCU serialisation for set-wedged protection */
1115 if (engine->schedule) 1115 if (engine->schedule)
1116 engine->schedule(request, &request->ctx->sched); 1116 engine->schedule(request, &request->gem_context->sched);
1117 rcu_read_unlock(); 1117 rcu_read_unlock();
1118 i915_sw_fence_commit(&request->submit); 1118 i915_sw_fence_commit(&request->submit);
1119 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ 1119 local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index eddbd4245cb3..dddecd9ffd0c 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -93,7 +93,7 @@ struct i915_request {
93 * i915_request_free() will then decrement the refcount on the 93 * i915_request_free() will then decrement the refcount on the
94 * context. 94 * context.
95 */ 95 */
96 struct i915_gem_context *ctx; 96 struct i915_gem_context *gem_context;
97 struct intel_engine_cs *engine; 97 struct intel_engine_cs *engine;
98 struct intel_ring *ring; 98 struct intel_ring *ring;
99 struct i915_timeline *timeline; 99 struct i915_timeline *timeline;
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 8cc3a256f29d..5d4f78765083 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -624,7 +624,7 @@ TRACE_EVENT(i915_request_queue,
624 624
625 TP_fast_assign( 625 TP_fast_assign(
626 __entry->dev = rq->i915->drm.primary->index; 626 __entry->dev = rq->i915->drm.primary->index;
627 __entry->hw_id = rq->ctx->hw_id; 627 __entry->hw_id = rq->gem_context->hw_id;
628 __entry->ring = rq->engine->id; 628 __entry->ring = rq->engine->id;
629 __entry->ctx = rq->fence.context; 629 __entry->ctx = rq->fence.context;
630 __entry->seqno = rq->fence.seqno; 630 __entry->seqno = rq->fence.seqno;
@@ -651,7 +651,7 @@ DECLARE_EVENT_CLASS(i915_request,
651 651
652 TP_fast_assign( 652 TP_fast_assign(
653 __entry->dev = rq->i915->drm.primary->index; 653 __entry->dev = rq->i915->drm.primary->index;
654 __entry->hw_id = rq->ctx->hw_id; 654 __entry->hw_id = rq->gem_context->hw_id;
655 __entry->ring = rq->engine->id; 655 __entry->ring = rq->engine->id;
656 __entry->ctx = rq->fence.context; 656 __entry->ctx = rq->fence.context;
657 __entry->seqno = rq->fence.seqno; 657 __entry->seqno = rq->fence.seqno;
@@ -696,7 +696,7 @@ TRACE_EVENT(i915_request_in,
696 696
697 TP_fast_assign( 697 TP_fast_assign(
698 __entry->dev = rq->i915->drm.primary->index; 698 __entry->dev = rq->i915->drm.primary->index;
699 __entry->hw_id = rq->ctx->hw_id; 699 __entry->hw_id = rq->gem_context->hw_id;
700 __entry->ring = rq->engine->id; 700 __entry->ring = rq->engine->id;
701 __entry->ctx = rq->fence.context; 701 __entry->ctx = rq->fence.context;
702 __entry->seqno = rq->fence.seqno; 702 __entry->seqno = rq->fence.seqno;
@@ -727,7 +727,7 @@ TRACE_EVENT(i915_request_out,
727 727
728 TP_fast_assign( 728 TP_fast_assign(
729 __entry->dev = rq->i915->drm.primary->index; 729 __entry->dev = rq->i915->drm.primary->index;
730 __entry->hw_id = rq->ctx->hw_id; 730 __entry->hw_id = rq->gem_context->hw_id;
731 __entry->ring = rq->engine->id; 731 __entry->ring = rq->engine->id;
732 __entry->ctx = rq->fence.context; 732 __entry->ctx = rq->fence.context;
733 __entry->seqno = rq->fence.seqno; 733 __entry->seqno = rq->fence.seqno;
@@ -815,7 +815,7 @@ TRACE_EVENT(i915_request_wait_begin,
815 */ 815 */
816 TP_fast_assign( 816 TP_fast_assign(
817 __entry->dev = rq->i915->drm.primary->index; 817 __entry->dev = rq->i915->drm.primary->index;
818 __entry->hw_id = rq->ctx->hw_id; 818 __entry->hw_id = rq->gem_context->hw_id;
819 __entry->ring = rq->engine->id; 819 __entry->ring = rq->engine->id;
820 __entry->ctx = rq->fence.context; 820 __entry->ctx = rq->fence.context;
821 __entry->seqno = rq->fence.seqno; 821 __entry->seqno = rq->fence.seqno;
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c
index e78c6e769e8c..7983b8a1ad44 100644
--- a/drivers/gpu/drm/i915/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/intel_engine_cs.c
@@ -1020,7 +1020,7 @@ bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine)
1020 */ 1020 */
1021 rq = __i915_gem_active_peek(&engine->timeline.last_request); 1021 rq = __i915_gem_active_peek(&engine->timeline.last_request);
1022 if (rq) 1022 if (rq)
1023 return rq->ctx == kernel_context; 1023 return rq->gem_context == kernel_context;
1024 else 1024 else
1025 return engine->last_retired_context == kernel_context; 1025 return engine->last_retired_context == kernel_context;
1026} 1026}
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 637e852888ec..a432a193f3c4 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -513,8 +513,9 @@ static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
513{ 513{
514 struct intel_guc_client *client = guc->execbuf_client; 514 struct intel_guc_client *client = guc->execbuf_client;
515 struct intel_engine_cs *engine = rq->engine; 515 struct intel_engine_cs *engine = rq->engine;
516 u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx, 516 u32 ctx_desc =
517 engine)); 517 lower_32_bits(intel_lr_context_descriptor(rq->gem_context,
518 engine));
518 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64); 519 u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
519 520
520 spin_lock(&client->wq_lock); 521 spin_lock(&client->wq_lock);
@@ -725,7 +726,7 @@ static bool __guc_dequeue(struct intel_engine_cs *engine)
725 struct i915_request *rq, *rn; 726 struct i915_request *rq, *rn;
726 727
727 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) { 728 list_for_each_entry_safe(rq, rn, &p->requests, sched.link) {
728 if (last && rq->ctx != last->ctx) { 729 if (last && rq->gem_context != last->gem_context) {
729 if (port == last_port) { 730 if (port == last_port) {
730 __list_del_many(&p->requests, 731 __list_del_many(&p->requests,
731 &rq->sched.link); 732 &rq->sched.link);
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index ae5adad7cc63..1e9cc55d785c 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -418,9 +418,10 @@ execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
418 418
419static u64 execlists_update_context(struct i915_request *rq) 419static u64 execlists_update_context(struct i915_request *rq)
420{ 420{
421 struct intel_context *ce = to_intel_context(rq->ctx, rq->engine); 421 struct intel_context *ce =
422 to_intel_context(rq->gem_context, rq->engine);
422 struct i915_hw_ppgtt *ppgtt = 423 struct i915_hw_ppgtt *ppgtt =
423 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 424 rq->gem_context->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
424 u32 *reg_state = ce->lrc_reg_state; 425 u32 *reg_state = ce->lrc_reg_state;
425 426
426 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail); 427 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
@@ -679,7 +680,8 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
679 * second request, and so we never need to tell the 680 * second request, and so we never need to tell the
680 * hardware about the first. 681 * hardware about the first.
681 */ 682 */
682 if (last && !can_merge_ctx(rq->ctx, last->ctx)) { 683 if (last && !can_merge_ctx(rq->gem_context,
684 last->gem_context)) {
683 /* 685 /*
684 * If we are on the second port and cannot 686 * If we are on the second port and cannot
685 * combine this request with the last, then we 687 * combine this request with the last, then we
@@ -698,14 +700,14 @@ static bool __execlists_dequeue(struct intel_engine_cs *engine)
698 * the same context (even though a different 700 * the same context (even though a different
699 * request) to the second port. 701 * request) to the second port.
700 */ 702 */
701 if (ctx_single_port_submission(last->ctx) || 703 if (ctx_single_port_submission(last->gem_context) ||
702 ctx_single_port_submission(rq->ctx)) { 704 ctx_single_port_submission(rq->gem_context)) {
703 __list_del_many(&p->requests, 705 __list_del_many(&p->requests,
704 &rq->sched.link); 706 &rq->sched.link);
705 goto done; 707 goto done;
706 } 708 }
707 709
708 GEM_BUG_ON(last->ctx == rq->ctx); 710 GEM_BUG_ON(last->gem_context == rq->gem_context);
709 711
710 if (submit) 712 if (submit)
711 port_assign(port, last); 713 port_assign(port, last);
@@ -1437,7 +1439,7 @@ static void execlists_context_unpin(struct intel_engine_cs *engine,
1437static int execlists_request_alloc(struct i915_request *request) 1439static int execlists_request_alloc(struct i915_request *request)
1438{ 1440{
1439 struct intel_context *ce = 1441 struct intel_context *ce =
1440 to_intel_context(request->ctx, request->engine); 1442 to_intel_context(request->gem_context, request->engine);
1441 int ret; 1443 int ret;
1442 1444
1443 GEM_BUG_ON(!ce->pin_count); 1445 GEM_BUG_ON(!ce->pin_count);
@@ -1954,7 +1956,7 @@ static void execlists_reset(struct intel_engine_cs *engine,
1954 * future request will be after userspace has had the opportunity 1956 * future request will be after userspace has had the opportunity
1955 * to recreate its own state. 1957 * to recreate its own state.
1956 */ 1958 */
1957 regs = to_intel_context(request->ctx, engine)->lrc_reg_state; 1959 regs = to_intel_context(request->gem_context, engine)->lrc_reg_state;
1958 if (engine->default_state) { 1960 if (engine->default_state) {
1959 void *defaults; 1961 void *defaults;
1960 1962
@@ -1967,7 +1969,8 @@ static void execlists_reset(struct intel_engine_cs *engine,
1967 i915_gem_object_unpin_map(engine->default_state); 1969 i915_gem_object_unpin_map(engine->default_state);
1968 } 1970 }
1969 } 1971 }
1970 execlists_init_reg_state(regs, request->ctx, engine, request->ring); 1972 execlists_init_reg_state(regs,
1973 request->gem_context, engine, request->ring);
1971 1974
1972 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */ 1975 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1973 regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma); 1976 regs[CTX_RING_BUFFER_START + 1] = i915_ggtt_offset(request->ring->vma);
@@ -1989,7 +1992,7 @@ static void execlists_reset_finish(struct intel_engine_cs *engine)
1989 1992
1990static int intel_logical_ring_emit_pdps(struct i915_request *rq) 1993static int intel_logical_ring_emit_pdps(struct i915_request *rq)
1991{ 1994{
1992 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; 1995 struct i915_hw_ppgtt *ppgtt = rq->gem_context->ppgtt;
1993 struct intel_engine_cs *engine = rq->engine; 1996 struct intel_engine_cs *engine = rq->engine;
1994 const int num_lri_cmds = GEN8_3LVL_PDPES * 2; 1997 const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
1995 u32 *cs; 1998 u32 *cs;
@@ -2028,15 +2031,15 @@ static int gen8_emit_bb_start(struct i915_request *rq,
2028 * it is unsafe in case of lite-restore (because the ctx is 2031 * it is unsafe in case of lite-restore (because the ctx is
2029 * not idle). PML4 is allocated during ppgtt init so this is 2032 * not idle). PML4 is allocated during ppgtt init so this is
2030 * not needed in 48-bit.*/ 2033 * not needed in 48-bit.*/
2031 if (rq->ctx->ppgtt && 2034 if (rq->gem_context->ppgtt &&
2032 (intel_engine_flag(rq->engine) & rq->ctx->ppgtt->pd_dirty_rings) && 2035 (intel_engine_flag(rq->engine) & rq->gem_context->ppgtt->pd_dirty_rings) &&
2033 !i915_vm_is_48bit(&rq->ctx->ppgtt->base) && 2036 !i915_vm_is_48bit(&rq->gem_context->ppgtt->base) &&
2034 !intel_vgpu_active(rq->i915)) { 2037 !intel_vgpu_active(rq->i915)) {
2035 ret = intel_logical_ring_emit_pdps(rq); 2038 ret = intel_logical_ring_emit_pdps(rq);
2036 if (ret) 2039 if (ret)
2037 return ret; 2040 return ret;
2038 2041
2039 rq->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine); 2042 rq->gem_context->ppgtt->pd_dirty_rings &= ~intel_engine_flag(rq->engine);
2040 } 2043 }
2041 2044
2042 cs = intel_ring_begin(rq, 6); 2045 cs = intel_ring_begin(rq, 6);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 6f200a747176..53703012ec75 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -571,8 +571,8 @@ static void reset_ring(struct intel_engine_cs *engine,
571 */ 571 */
572 if (request) { 572 if (request) {
573 struct drm_i915_private *dev_priv = request->i915; 573 struct drm_i915_private *dev_priv = request->i915;
574 struct intel_context *ce = to_intel_context(request->ctx, 574 struct intel_context *ce =
575 engine); 575 to_intel_context(request->gem_context, engine);
576 struct i915_hw_ppgtt *ppgtt; 576 struct i915_hw_ppgtt *ppgtt;
577 577
578 if (ce->state) { 578 if (ce->state) {
@@ -584,7 +584,7 @@ static void reset_ring(struct intel_engine_cs *engine,
584 CCID_EN); 584 CCID_EN);
585 } 585 }
586 586
587 ppgtt = request->ctx->ppgtt ?: engine->i915->mm.aliasing_ppgtt; 587 ppgtt = request->gem_context->ppgtt ?: engine->i915->mm.aliasing_ppgtt;
588 if (ppgtt) { 588 if (ppgtt) {
589 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10; 589 u32 pd_offset = ppgtt->pd.base.ggtt_offset << 10;
590 590
@@ -1458,7 +1458,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
1458 1458
1459 *cs++ = MI_NOOP; 1459 *cs++ = MI_NOOP;
1460 *cs++ = MI_SET_CONTEXT; 1460 *cs++ = MI_SET_CONTEXT;
1461 *cs++ = i915_ggtt_offset(to_intel_context(rq->ctx, engine)->state) | flags; 1461 *cs++ = i915_ggtt_offset(to_intel_context(rq->gem_context, engine)->state) | flags;
1462 /* 1462 /*
1463 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP 1463 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1464 * WaMiSetContext_Hang:snb,ivb,vlv 1464 * WaMiSetContext_Hang:snb,ivb,vlv
@@ -1526,7 +1526,7 @@ static int remap_l3(struct i915_request *rq, int slice)
1526static int switch_context(struct i915_request *rq) 1526static int switch_context(struct i915_request *rq)
1527{ 1527{
1528 struct intel_engine_cs *engine = rq->engine; 1528 struct intel_engine_cs *engine = rq->engine;
1529 struct i915_gem_context *to_ctx = rq->ctx; 1529 struct i915_gem_context *to_ctx = rq->gem_context;
1530 struct i915_hw_ppgtt *to_mm = 1530 struct i915_hw_ppgtt *to_mm =
1531 to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt; 1531 to_ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
1532 struct i915_gem_context *from_ctx = engine->legacy_active_context; 1532 struct i915_gem_context *from_ctx = engine->legacy_active_context;
@@ -1597,7 +1597,7 @@ static int ring_request_alloc(struct i915_request *request)
1597{ 1597{
1598 int ret; 1598 int ret;
1599 1599
1600 GEM_BUG_ON(!to_intel_context(request->ctx, request->engine)->pin_count); 1600 GEM_BUG_ON(!to_intel_context(request->gem_context, request->engine)->pin_count);
1601 1601
1602 /* Flush enough space to reduce the likelihood of waiting after 1602 /* Flush enough space to reduce the likelihood of waiting after
1603 * we start building the request - in which case we will just 1603 * we start building the request - in which case we will just
diff --git a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
index 438e0b045a2c..2c4e77c050dc 100644
--- a/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
+++ b/drivers/gpu/drm/i915/selftests/intel_hangcheck.c
@@ -105,7 +105,10 @@ static int emit_recurse_batch(struct hang *h,
105 struct i915_request *rq) 105 struct i915_request *rq)
106{ 106{
107 struct drm_i915_private *i915 = h->i915; 107 struct drm_i915_private *i915 = h->i915;
108 struct i915_address_space *vm = rq->ctx->ppgtt ? &rq->ctx->ppgtt->base : &i915->ggtt.base; 108 struct i915_address_space *vm =
109 rq->gem_context->ppgtt ?
110 &rq->gem_context->ppgtt->base :
111 &i915->ggtt.base;
109 struct i915_vma *hws, *vma; 112 struct i915_vma *hws, *vma;
110 unsigned int flags; 113 unsigned int flags;
111 u32 *batch; 114 u32 *batch;
diff --git a/drivers/gpu/drm/i915/selftests/intel_lrc.c b/drivers/gpu/drm/i915/selftests/intel_lrc.c
index 1b8a07125150..68cb9126b3e1 100644
--- a/drivers/gpu/drm/i915/selftests/intel_lrc.c
+++ b/drivers/gpu/drm/i915/selftests/intel_lrc.c
@@ -83,7 +83,7 @@ static int emit_recurse_batch(struct spinner *spin,
83 struct i915_request *rq, 83 struct i915_request *rq,
84 u32 arbitration_command) 84 u32 arbitration_command)
85{ 85{
86 struct i915_address_space *vm = &rq->ctx->ppgtt->base; 86 struct i915_address_space *vm = &rq->gem_context->ppgtt->base;
87 struct i915_vma *hws, *vma; 87 struct i915_vma *hws, *vma;
88 u32 *batch; 88 u32 *batch;
89 int err; 89 int err;