aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/render.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/render.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c14
1 files changed, 8 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 6d066cf35478..0672178548ef 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -147,6 +147,7 @@ static u32 gen9_render_mocs_L3[32];
147static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) 147static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
148{ 148{
149 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 149 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
150 struct intel_vgpu_submission *s = &vgpu->submission;
150 enum forcewake_domains fw; 151 enum forcewake_domains fw;
151 i915_reg_t reg; 152 i915_reg_t reg;
152 u32 regs[] = { 153 u32 regs[] = {
@@ -160,7 +161,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
160 if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) 161 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
161 return; 162 return;
162 163
163 if (!test_and_clear_bit(ring_id, (void *)vgpu->tlb_handle_pending)) 164 if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
164 return; 165 return;
165 166
166 reg = _MMIO(regs[ring_id]); 167 reg = _MMIO(regs[ring_id]);
@@ -208,7 +209,7 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
208 offset.reg = regs[ring_id]; 209 offset.reg = regs[ring_id];
209 for (i = 0; i < 64; i++) { 210 for (i = 0; i < 64; i++) {
210 gen9_render_mocs[ring_id][i] = I915_READ_FW(offset); 211 gen9_render_mocs[ring_id][i] = I915_READ_FW(offset);
211 I915_WRITE(offset, vgpu_vreg(vgpu, offset)); 212 I915_WRITE_FW(offset, vgpu_vreg(vgpu, offset));
212 offset.reg += 4; 213 offset.reg += 4;
213 } 214 }
214 215
@@ -261,14 +262,15 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
261static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) 262static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
262{ 263{
263 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 264 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
264 struct render_mmio *mmio; 265 struct intel_vgpu_submission *s = &vgpu->submission;
265 u32 v; 266 u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state;
266 int i, array_size;
267 u32 *reg_state = vgpu->shadow_ctx->engine[ring_id].lrc_reg_state;
268 u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL]; 267 u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
269 u32 inhibit_mask = 268 u32 inhibit_mask =
270 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 269 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
271 i915_reg_t last_reg = _MMIO(0); 270 i915_reg_t last_reg = _MMIO(0);
271 struct render_mmio *mmio;
272 u32 v;
273 int i, array_size;
272 274
273 if (IS_SKYLAKE(vgpu->gvt->dev_priv) 275 if (IS_SKYLAKE(vgpu->gvt->dev_priv)
274 || IS_KABYLAKE(vgpu->gvt->dev_priv)) { 276 || IS_KABYLAKE(vgpu->gvt->dev_priv)) {