aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/gvt/mmio.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/gvt/mmio.c')
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 1e1310f50289..4ea0feb5f04d 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -117,18 +117,18 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
117 else 117 else
118 memcpy(pt, p_data, bytes); 118 memcpy(pt, p_data, bytes);
119 119
120 } else if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 120 } else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
121 struct intel_vgpu_guest_page *gp; 121 struct intel_vgpu_page_track *t;
122 122
123 /* Since we enter the failsafe mode early during guest boot, 123 /* Since we enter the failsafe mode early during guest boot,
124 * guest may not have chance to set up its ppgtt table, so 124 * guest may not have chance to set up its ppgtt table, so
125 * there should not be any wp pages for guest. Keep the wp 125 * there should not be any wp pages for guest. Keep the wp
126 * related code here in case we need to handle it in furture. 126 * related code here in case we need to handle it in furture.
127 */ 127 */
128 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 128 t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
129 if (gp) { 129 if (t) {
130 /* remove write protection to prevent furture traps */ 130 /* remove write protection to prevent furture traps */
131 intel_vgpu_clean_guest_page(vgpu, gp); 131 intel_vgpu_clean_page_track(vgpu, t);
132 if (read) 132 if (read)
133 intel_gvt_hypervisor_read_gpa(vgpu, pa, 133 intel_gvt_hypervisor_read_gpa(vgpu, pa,
134 p_data, bytes); 134 p_data, bytes);
@@ -170,17 +170,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
170 return ret; 170 return ret;
171 } 171 }
172 172
173 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 173 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
174 struct intel_vgpu_guest_page *gp; 174 struct intel_vgpu_page_track *t;
175 175
176 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 176 t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
177 if (gp) { 177 if (t) {
178 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, 178 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
179 p_data, bytes); 179 p_data, bytes);
180 if (ret) { 180 if (ret) {
181 gvt_vgpu_err("guest page read error %d, " 181 gvt_vgpu_err("guest page read error %d, "
182 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n", 182 "gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
183 ret, gp->gfn, pa, *(u32 *)p_data, 183 ret, t->gfn, pa, *(u32 *)p_data,
184 bytes); 184 bytes);
185 } 185 }
186 mutex_unlock(&gvt->lock); 186 mutex_unlock(&gvt->lock);
@@ -267,17 +267,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
267 return ret; 267 return ret;
268 } 268 }
269 269
270 if (atomic_read(&vgpu->gtt.n_write_protected_guest_page)) { 270 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
271 struct intel_vgpu_guest_page *gp; 271 struct intel_vgpu_page_track *t;
272 272
273 gp = intel_vgpu_find_guest_page(vgpu, pa >> PAGE_SHIFT); 273 t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
274 if (gp) { 274 if (t) {
275 ret = gp->handler(gp, pa, p_data, bytes); 275 ret = t->handler(t, pa, p_data, bytes);
276 if (ret) { 276 if (ret) {
277 gvt_err("guest page write error %d, " 277 gvt_err("guest page write error %d, "
278 "gfn 0x%lx, pa 0x%llx, " 278 "gfn 0x%lx, pa 0x%llx, "
279 "var 0x%x, len %d\n", 279 "var 0x%x, len %d\n",
280 ret, gp->gfn, pa, 280 ret, t->gfn, pa,
281 *(u32 *)p_data, bytes); 281 *(u32 *)p_data, bytes);
282 } 282 }
283 mutex_unlock(&gvt->lock); 283 mutex_unlock(&gvt->lock);