aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiong Zhang <xiong.y.zhang@intel.com>2017-11-06 16:23:02 -0500
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-12-05 22:33:30 -0500
commit29f9e425973d3c7cdfc32725a4d7c4c2f537d5a9 (patch)
tree58a0db4e16e4f139c9b6f38e8cd6cfab6167290a
parent365ad5df9caa1a7ebc73b8d70ab94bbf6a74268a (diff)
drm/i915/gvt: Limit read hw reg to active vgpu
mmio_read_from_hw() let vgpu could read hw reg, if vgpu's workload is running on hw, things is good. Otherwise vgpu will get other vgpu's reg val, it is unsafe. This patch limit such hw access to active vgpu. If vgpu isn't running on hw, the reg read of this vgpu will get the last active val which saved at schedule_out. v2: ring timestamp is walking continuously even if the ring is idle. so read hw directly. (Zhenyu) Signed-off-by: Xiong Zhang <xiong.y.zhang@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com> (cherry picked from commit 295764cd2ff41e2c1bc8af4050de77cec5e7a1c0)
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c15
2 files changed, 37 insertions, 4 deletions
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 55cbdb022924..1f840f6b81bb 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -1407,11 +1407,29 @@ static int skl_lcpll_write(struct intel_vgpu *vgpu, unsigned int offset,
1407static int mmio_read_from_hw(struct intel_vgpu *vgpu, 1407static int mmio_read_from_hw(struct intel_vgpu *vgpu,
1408 unsigned int offset, void *p_data, unsigned int bytes) 1408 unsigned int offset, void *p_data, unsigned int bytes)
1409{ 1409{
1410 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 1410 struct intel_gvt *gvt = vgpu->gvt;
1411 struct drm_i915_private *dev_priv = gvt->dev_priv;
1412 int ring_id;
1413 u32 ring_base;
1414
1415 ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
1416 /**
1417 * Read HW reg in following case
1418 * a. the offset isn't a ring mmio
1419 * b. the offset's ring is running on hw.
1420 * c. the offset is ring time stamp mmio
1421 */
1422 if (ring_id >= 0)
1423 ring_base = dev_priv->engine[ring_id]->mmio_base;
1424
1425 if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
1426 offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
1427 offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
1428 mmio_hw_access_pre(dev_priv);
1429 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1430 mmio_hw_access_post(dev_priv);
1431 }
1411 1432
1412 mmio_hw_access_pre(dev_priv);
1413 vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
1414 mmio_hw_access_post(dev_priv);
1415 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes); 1433 return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
1416} 1434}
1417 1435
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 3ac1dc97a7a0..a63787be1e6b 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -131,6 +131,20 @@ static inline bool is_gvt_request(struct drm_i915_gem_request *req)
131 return i915_gem_context_force_single_submission(req->ctx); 131 return i915_gem_context_force_single_submission(req->ctx);
132} 132}
133 133
134static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
135{
136 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
137 u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
138 i915_reg_t reg;
139
140 reg = RING_INSTDONE(ring_base);
141 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
142 reg = RING_ACTHD(ring_base);
143 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
144 reg = RING_ACTHD_UDW(ring_base);
145 vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
146}
147
134static int shadow_context_status_change(struct notifier_block *nb, 148static int shadow_context_status_change(struct notifier_block *nb,
135 unsigned long action, void *data) 149 unsigned long action, void *data)
136{ 150{
@@ -176,6 +190,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
176 break; 190 break;
177 case INTEL_CONTEXT_SCHEDULE_OUT: 191 case INTEL_CONTEXT_SCHEDULE_OUT:
178 case INTEL_CONTEXT_SCHEDULE_PREEMPTED: 192 case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
193 save_ring_hw_state(workload->vgpu, ring_id);
179 atomic_set(&workload->shadow_ctx_active, 0); 194 atomic_set(&workload->shadow_ctx_active, 0);
180 break; 195 break;
181 default: 196 default: