aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorZhi Wang <zhi.a.wang@intel.com>2018-02-07 05:12:15 -0500
committerZhenyu Wang <zhenyuw@linux.intel.com>2018-03-19 02:51:30 -0400
commitb20c0d5ce1047ba03a6709a07f31f4d7178de35c (patch)
tree3e3587c53e6ba66ad12ea57441db17c11896e4cc /drivers
parent730c8ead53bf3011d33de69ff5a6cebf51e697b5 (diff)
drm/i915/gvt: Update PDPs after a vGPU mm object is pinned.
The PDPs of a shadow page will only be valid after a vGPU mm is pinned. So the PDPs in the shadow context should be updated then. Signed-off-by: Zhi Wang <zhi.a.wang@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c28
1 files changed, 25 insertions, 3 deletions
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 9b92b4e25a20..1127bd77fc6e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -52,6 +52,29 @@ static void set_context_pdp_root_pointer(
52 pdp_pair[i].val = pdp[7 - i]; 52 pdp_pair[i].val = pdp[7 - i];
53} 53}
54 54
55static void update_shadow_pdps(struct intel_vgpu_workload *workload)
56{
57 struct intel_vgpu *vgpu = workload->vgpu;
58 int ring_id = workload->ring_id;
59 struct i915_gem_context *shadow_ctx = vgpu->submission.shadow_ctx;
60 struct drm_i915_gem_object *ctx_obj =
61 shadow_ctx->engine[ring_id].state->obj;
62 struct execlist_ring_context *shadow_ring_context;
63 struct page *page;
64
65 if (WARN_ON(!workload->shadow_mm))
66 return;
67
68 if (WARN_ON(!atomic_read(&workload->shadow_mm->pincount)))
69 return;
70
71 page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
72 shadow_ring_context = kmap(page);
73 set_context_pdp_root_pointer(shadow_ring_context,
74 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
75 kunmap(page);
76}
77
55static int populate_shadow_context(struct intel_vgpu_workload *workload) 78static int populate_shadow_context(struct intel_vgpu_workload *workload)
56{ 79{
57 struct intel_vgpu *vgpu = workload->vgpu; 80 struct intel_vgpu *vgpu = workload->vgpu;
@@ -112,9 +135,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
112 } 135 }
113#undef COPY_REG 136#undef COPY_REG
114 137
115 set_context_pdp_root_pointer(shadow_ring_context,
116 (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
117
118 intel_gvt_hypervisor_read_gpa(vgpu, 138 intel_gvt_hypervisor_read_gpa(vgpu,
119 workload->ring_context_gpa + 139 workload->ring_context_gpa +
120 sizeof(*shadow_ring_context), 140 sizeof(*shadow_ring_context),
@@ -509,6 +529,8 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
509 return ret; 529 return ret;
510 } 530 }
511 531
532 update_shadow_pdps(workload);
533
512 ret = intel_vgpu_sync_oos_pages(workload->vgpu); 534 ret = intel_vgpu_sync_oos_pages(workload->vgpu);
513 if (ret) { 535 if (ret) {
514 gvt_vgpu_err("fail to vgpu sync oos pages\n"); 536 gvt_vgpu_err("fail to vgpu sync oos pages\n");