aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorPei Zhang <pei.zhang@intel.com>2017-12-11 04:15:02 -0500
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-12-11 04:17:31 -0500
commiteb3f05171c2e84f0114403df0fea942479fdaa3e (patch)
tree2a8d60ccf48fd6a9ae2563e7f46bef6b2b6fe246 /drivers
parent6ee942d5f7e3e630d3a2517e75969ce5d07c87d6 (diff)
drm/i915/gvt: refine function emulate_mmio_read/write
These 2 functions are coded by multiple person in multiple patches. The 'return' and 'goto err' are mix-used in same place, which cause the function looks disorder. Unify to use only 'goto' so that the gvt lock is acquired in one place and released in one place. Signed-off-by: Pei Zhang <pei.zhang@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c36
1 files changed, 15 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 4ea0feb5f04d..f7227a3ad469 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -157,7 +157,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
157 unsigned int offset = 0; 157 unsigned int offset = 0;
158 int ret = -EINVAL; 158 int ret = -EINVAL;
159 159
160
161 if (vgpu->failsafe) { 160 if (vgpu->failsafe) {
162 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true); 161 failsafe_emulate_mmio_rw(vgpu, pa, p_data, bytes, true);
163 return 0; 162 return 0;
@@ -166,8 +165,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
166 165
167 if (vgpu_gpa_is_aperture(vgpu, pa)) { 166 if (vgpu_gpa_is_aperture(vgpu, pa)) {
168 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true); 167 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, true);
169 mutex_unlock(&gvt->lock); 168 goto out;
170 return ret;
171 } 169 }
172 170
173 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { 171 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
@@ -183,8 +181,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
183 ret, t->gfn, pa, *(u32 *)p_data, 181 ret, t->gfn, pa, *(u32 *)p_data,
184 bytes); 182 bytes);
185 } 183 }
186 mutex_unlock(&gvt->lock); 184 goto out;
187 return ret;
188 } 185 }
189 } 186 }
190 187
@@ -205,14 +202,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
205 p_data, bytes); 202 p_data, bytes);
206 if (ret) 203 if (ret)
207 goto err; 204 goto err;
208 mutex_unlock(&gvt->lock); 205 goto out;
209 return ret;
210 } 206 }
211 207
212 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 208 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
213 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes); 209 ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
214 mutex_unlock(&gvt->lock); 210 goto out;
215 return ret;
216 } 211 }
217 212
218 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1))) 213 if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
@@ -228,11 +223,13 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
228 goto err; 223 goto err;
229 224
230 intel_gvt_mmio_set_accessed(gvt, offset); 225 intel_gvt_mmio_set_accessed(gvt, offset);
231 mutex_unlock(&gvt->lock); 226 ret = 0;
232 return 0; 227 goto out;
228
233err: 229err:
234 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n", 230 gvt_vgpu_err("fail to emulate MMIO read %08x len %d\n",
235 offset, bytes); 231 offset, bytes);
232out:
236 mutex_unlock(&gvt->lock); 233 mutex_unlock(&gvt->lock);
237 return ret; 234 return ret;
238} 235}
@@ -263,8 +260,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
263 260
264 if (vgpu_gpa_is_aperture(vgpu, pa)) { 261 if (vgpu_gpa_is_aperture(vgpu, pa)) {
265 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false); 262 ret = vgpu_aperture_rw(vgpu, pa, p_data, bytes, false);
266 mutex_unlock(&gvt->lock); 263 goto out;
267 return ret;
268 } 264 }
269 265
270 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) { 266 if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
@@ -280,8 +276,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
280 ret, t->gfn, pa, 276 ret, t->gfn, pa,
281 *(u32 *)p_data, bytes); 277 *(u32 *)p_data, bytes);
282 } 278 }
283 mutex_unlock(&gvt->lock); 279 goto out;
284 return ret;
285 } 280 }
286 } 281 }
287 282
@@ -302,14 +297,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
302 p_data, bytes); 297 p_data, bytes);
303 if (ret) 298 if (ret)
304 goto err; 299 goto err;
305 mutex_unlock(&gvt->lock); 300 goto out;
306 return ret;
307 } 301 }
308 302
309 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) { 303 if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
310 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes); 304 ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
311 mutex_unlock(&gvt->lock); 305 goto out;
312 return ret;
313 } 306 }
314 307
315 ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false); 308 ret = intel_vgpu_mmio_reg_rw(vgpu, offset, p_data, bytes, false);
@@ -317,11 +310,12 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
317 goto err; 310 goto err;
318 311
319 intel_gvt_mmio_set_accessed(gvt, offset); 312 intel_gvt_mmio_set_accessed(gvt, offset);
320 mutex_unlock(&gvt->lock); 313 ret = 0;
321 return 0; 314 goto out;
322err: 315err:
323 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset, 316 gvt_vgpu_err("fail to emulate MMIO write %08x len %d\n", offset,
324 bytes); 317 bytes);
318out:
325 mutex_unlock(&gvt->lock); 319 mutex_unlock(&gvt->lock);
326 return ret; 320 return ret;
327} 321}