aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Wilson <chris@chris-wilson.co.uk>2018-09-19 16:54:32 -0400
committerChris Wilson <chris@chris-wilson.co.uk>2018-09-20 07:51:42 -0400
commit8fcd86baab7724196ae8605e58c38dd17d598ef9 (patch)
treee583d6d8740bf2f20e0df2cc94fb9d7e226878e8
parenta47cd45a37c94bb31ccf9bf6ec3d94cbcd57d6ee (diff)
drm/i915/guc: Restore preempt-context across S3/S4
Stolen memory is lost across S4 (hibernate) or S3-RST as it is a portion of ordinary volatile RAM. As we allocate our rings from stolen, this may include the rings used for our preempt context and their breadcrumb instructions. In order to allow preemption following hibernation and loss of stolen memory, we therefore need to repopulate the instructions inside the lost ring upon resume. To handle both module load and resume, we simply defer constructing the ring to first use. Testcase: igt/drv_selftest/live_gem Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk> Cc: Michał Winiarski <michal.winiarski@intel.com> Cc: Michal Wajdeczko <michal.wajdeczko@intel.com> Reviewed-by: Michał Winiarski <michal.winiarski@intel.com> Link: https://patchwork.freedesktop.org/patch/msgid/20180919205432.18394-1-chris@chris-wilson.co.uk
-rw-r--r--drivers/gpu/drm/i915/intel_guc_submission.c80
1 files changed, 27 insertions, 53 deletions
diff --git a/drivers/gpu/drm/i915/intel_guc_submission.c b/drivers/gpu/drm/i915/intel_guc_submission.c
index 07b9d313b019..a81f04d46e87 100644
--- a/drivers/gpu/drm/i915/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/intel_guc_submission.c
@@ -557,16 +557,36 @@ static void inject_preempt_context(struct work_struct *work)
557 preempt_work[engine->id]); 557 preempt_work[engine->id]);
558 struct intel_guc_client *client = guc->preempt_client; 558 struct intel_guc_client *client = guc->preempt_client;
559 struct guc_stage_desc *stage_desc = __get_stage_desc(client); 559 struct guc_stage_desc *stage_desc = __get_stage_desc(client);
560 u32 ctx_desc = lower_32_bits(to_intel_context(client->owner, 560 struct intel_context *ce = to_intel_context(client->owner, engine);
561 engine)->lrc_desc);
562 u32 data[7]; 561 u32 data[7];
563 562
564 /* 563 if (!ce->ring->emit) { /* recreate upon load/resume */
565 * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP. 564 u32 addr = intel_hws_preempt_done_address(engine);
566 * See guc_fill_preempt_context(). 565 u32 *cs;
567 */ 566
567 cs = ce->ring->vaddr;
568 if (engine->id == RCS) {
569 cs = gen8_emit_ggtt_write_rcs(cs,
570 GUC_PREEMPT_FINISHED,
571 addr);
572 } else {
573 cs = gen8_emit_ggtt_write(cs,
574 GUC_PREEMPT_FINISHED,
575 addr);
576 *cs++ = MI_NOOP;
577 *cs++ = MI_NOOP;
578 }
579 *cs++ = MI_USER_INTERRUPT;
580 *cs++ = MI_NOOP;
581
582 ce->ring->emit = GUC_PREEMPT_BREADCRUMB_BYTES;
583 GEM_BUG_ON((void *)cs - ce->ring->vaddr != ce->ring->emit);
584
585 flush_ggtt_writes(ce->ring->vma);
586 }
587
568 spin_lock_irq(&client->wq_lock); 588 spin_lock_irq(&client->wq_lock);
569 guc_wq_item_append(client, engine->guc_id, ctx_desc, 589 guc_wq_item_append(client, engine->guc_id, lower_32_bits(ce->lrc_desc),
570 GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0); 590 GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
571 spin_unlock_irq(&client->wq_lock); 591 spin_unlock_irq(&client->wq_lock);
572 592
@@ -1044,50 +1064,6 @@ static inline bool ctx_save_restore_disabled(struct intel_context *ce)
1044#undef SR_DISABLED 1064#undef SR_DISABLED
1045} 1065}
1046 1066
1047static void guc_fill_preempt_context(struct intel_guc *guc)
1048{
1049 struct drm_i915_private *dev_priv = guc_to_i915(guc);
1050 struct intel_guc_client *client = guc->preempt_client;
1051 struct intel_engine_cs *engine;
1052 enum intel_engine_id id;
1053
1054 for_each_engine(engine, dev_priv, id) {
1055 struct intel_context *ce =
1056 to_intel_context(client->owner, engine);
1057 u32 addr = intel_hws_preempt_done_address(engine);
1058 u32 *cs;
1059
1060 GEM_BUG_ON(!ce->pin_count);
1061
1062 /*
1063 * We rely on this context image *not* being saved after
1064 * preemption. This ensures that the RING_HEAD / RING_TAIL
1065 * remain pointing at initial values forever.
1066 */
1067 GEM_BUG_ON(!ctx_save_restore_disabled(ce));
1068
1069 cs = ce->ring->vaddr;
1070 if (id == RCS) {
1071 cs = gen8_emit_ggtt_write_rcs(cs,
1072 GUC_PREEMPT_FINISHED,
1073 addr);
1074 } else {
1075 cs = gen8_emit_ggtt_write(cs,
1076 GUC_PREEMPT_FINISHED,
1077 addr);
1078 *cs++ = MI_NOOP;
1079 *cs++ = MI_NOOP;
1080 }
1081 *cs++ = MI_USER_INTERRUPT;
1082 *cs++ = MI_NOOP;
1083
1084 GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
1085 GUC_PREEMPT_BREADCRUMB_BYTES);
1086
1087 flush_ggtt_writes(ce->ring->vma);
1088 }
1089}
1090
1091static int guc_clients_create(struct intel_guc *guc) 1067static int guc_clients_create(struct intel_guc *guc)
1092{ 1068{
1093 struct drm_i915_private *dev_priv = guc_to_i915(guc); 1069 struct drm_i915_private *dev_priv = guc_to_i915(guc);
@@ -1118,8 +1094,6 @@ static int guc_clients_create(struct intel_guc *guc)
1118 return PTR_ERR(client); 1094 return PTR_ERR(client);
1119 } 1095 }
1120 guc->preempt_client = client; 1096 guc->preempt_client = client;
1121
1122 guc_fill_preempt_context(guc);
1123 } 1097 }
1124 1098
1125 return 0; 1099 return 0;