aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChangbin Du <changbin.du@intel.com>2017-12-08 01:56:21 -0500
committerZhenyu Wang <zhenyuw@linux.intel.com>2017-12-08 03:18:19 -0500
commit83164886e4559f87015a33780852a64cdd6e4e50 (patch)
tree37924557b861d87a4e250927b917c297af01a3ef
parent4447f423ff0822f3eacc31bbaf445226fa312b84 (diff)
drm/i915/gvt: Select appropriate mmio list at initialization time
Select appropriate mmio list at initialization time, so we don't need to do duplicated work at where requires the mmio list. V2: - Add a termination mark of mmio list. Signed-off-by: Changbin Du <changbin.du@intel.com> Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/render.c60
-rw-r--r--drivers/gpu/drm/i915/gvt/render.h9
4 files changed, 40 insertions, 33 deletions
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 9a5dce3aa10a..643bb961d40d 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -386,6 +386,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
386 if (ret) 386 if (ret)
387 goto out_clean_idr; 387 goto out_clean_idr;
388 388
389 intel_gvt_init_engine_mmio_context(gvt);
390
389 ret = intel_gvt_load_firmware(gvt); 391 ret = intel_gvt_load_firmware(gvt);
390 if (ret) 392 if (ret)
391 goto out_clean_mmio_info; 393 goto out_clean_mmio_info;
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 77df9bad5dea..39c2f3a4588e 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -310,6 +310,8 @@ struct intel_gvt {
310 wait_queue_head_t service_thread_wq; 310 wait_queue_head_t service_thread_wq;
311 unsigned long service_request; 311 unsigned long service_request;
312 312
313 struct engine_mmio *engine_mmio_list;
314
313 struct dentry *debugfs_root; 315 struct dentry *debugfs_root;
314}; 316};
315 317
diff --git a/drivers/gpu/drm/i915/gvt/render.c b/drivers/gpu/drm/i915/gvt/render.c
index 43abca5dbe75..3e675f81815f 100644
--- a/drivers/gpu/drm/i915/gvt/render.c
+++ b/drivers/gpu/drm/i915/gvt/render.c
@@ -37,14 +37,6 @@
37#include "gvt.h" 37#include "gvt.h"
38#include "trace.h" 38#include "trace.h"
39 39
40struct render_mmio {
41 int ring_id;
42 i915_reg_t reg;
43 u32 mask;
44 bool in_context;
45 u32 value;
46};
47
48/** 40/**
49 * Defined in Intel Open Source PRM. 41 * Defined in Intel Open Source PRM.
50 * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms 42 * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
@@ -59,7 +51,7 @@ struct render_mmio {
59#define VF_GUARDBAND _MMIO(0x83a4) 51#define VF_GUARDBAND _MMIO(0x83a4)
60 52
61/* Raw offset is appened to each line for convenience. */ 53/* Raw offset is appened to each line for convenience. */
62static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = { 54static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
63 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ 55 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
64 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 56 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
65 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ 57 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
@@ -88,9 +80,10 @@ static struct render_mmio gen8_render_mmio_list[] __cacheline_aligned = {
88 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ 80 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
89 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ 81 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
90 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ 82 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
83 { /* Terminated */ }
91}; 84};
92 85
93static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = { 86static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
94 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ 87 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
95 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ 88 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
96 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */ 89 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
@@ -153,6 +146,7 @@ static struct render_mmio gen9_render_mmio_list[] __cacheline_aligned = {
153 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */ 146 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
154 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */ 147 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
155 {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */ 148 {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
149 { /* Terminated */ }
156}; 150};
157 151
158static u32 gen9_render_mocs[I915_NUM_ENGINES][64]; 152static u32 gen9_render_mocs[I915_NUM_ENGINES][64];
@@ -282,21 +276,14 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
282 u32 inhibit_mask = 276 u32 inhibit_mask =
283 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); 277 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
284 i915_reg_t last_reg = _MMIO(0); 278 i915_reg_t last_reg = _MMIO(0);
285 struct render_mmio *mmio; 279 struct engine_mmio *mmio;
286 u32 v; 280 u32 v;
287 int i, array_size;
288 281
289 if (IS_SKYLAKE(vgpu->gvt->dev_priv) 282 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
290 || IS_KABYLAKE(vgpu->gvt->dev_priv)) {
291 mmio = gen9_render_mmio_list;
292 array_size = ARRAY_SIZE(gen9_render_mmio_list);
293 load_mocs(vgpu, ring_id); 283 load_mocs(vgpu, ring_id);
294 } else {
295 mmio = gen8_render_mmio_list;
296 array_size = ARRAY_SIZE(gen8_render_mmio_list);
297 }
298 284
299 for (i = 0; i < array_size; i++, mmio++) { 285 mmio = vgpu->gvt->engine_mmio_list;
286 while (i915_mmio_reg_offset((mmio++)->reg)) {
300 if (mmio->ring_id != ring_id) 287 if (mmio->ring_id != ring_id)
301 continue; 288 continue;
302 289
@@ -326,7 +313,7 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
326 } 313 }
327 314
328 /* Make sure the swiched MMIOs has taken effect. */ 315 /* Make sure the swiched MMIOs has taken effect. */
329 if (likely(INTEL_GVT_MMIO_OFFSET(last_reg))) 316 if (likely(i915_mmio_reg_offset(last_reg)))
330 I915_READ_FW(last_reg); 317 I915_READ_FW(last_reg);
331 318
332 handle_tlb_pending_event(vgpu, ring_id); 319 handle_tlb_pending_event(vgpu, ring_id);
@@ -336,21 +323,15 @@ static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id)
336static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id) 323static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
337{ 324{
338 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; 325 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
339 struct render_mmio *mmio;
340 i915_reg_t last_reg = _MMIO(0); 326 i915_reg_t last_reg = _MMIO(0);
327 struct engine_mmio *mmio;
341 u32 v; 328 u32 v;
342 int i, array_size;
343 329
344 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { 330 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
345 mmio = gen9_render_mmio_list;
346 array_size = ARRAY_SIZE(gen9_render_mmio_list);
347 restore_mocs(vgpu, ring_id); 331 restore_mocs(vgpu, ring_id);
348 } else {
349 mmio = gen8_render_mmio_list;
350 array_size = ARRAY_SIZE(gen8_render_mmio_list);
351 }
352 332
353 for (i = 0; i < array_size; i++, mmio++) { 333 mmio = vgpu->gvt->engine_mmio_list;
334 while (i915_mmio_reg_offset((mmio++)->reg)) {
354 if (mmio->ring_id != ring_id) 335 if (mmio->ring_id != ring_id)
355 continue; 336 continue;
356 337
@@ -374,7 +355,7 @@ static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
374 } 355 }
375 356
376 /* Make sure the swiched MMIOs has taken effect. */ 357 /* Make sure the swiched MMIOs has taken effect. */
377 if (likely(INTEL_GVT_MMIO_OFFSET(last_reg))) 358 if (likely(i915_mmio_reg_offset(last_reg)))
378 I915_READ_FW(last_reg); 359 I915_READ_FW(last_reg);
379} 360}
380 361
@@ -419,3 +400,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
419 400
420 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); 401 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
421} 402}
403
404/**
405 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
406 * @gvt: GVT device
407 *
408 */
409void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
410{
411 if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
412 gvt->engine_mmio_list = gen9_engine_mmio_list;
413 else
414 gvt->engine_mmio_list = gen8_engine_mmio_list;
415}
diff --git a/drivers/gpu/drm/i915/gvt/render.h b/drivers/gpu/drm/i915/gvt/render.h
index 91db1d39d28f..ca2c6a745673 100644
--- a/drivers/gpu/drm/i915/gvt/render.h
+++ b/drivers/gpu/drm/i915/gvt/render.h
@@ -36,8 +36,17 @@
36#ifndef __GVT_RENDER_H__ 36#ifndef __GVT_RENDER_H__
37#define __GVT_RENDER_H__ 37#define __GVT_RENDER_H__
38 38
39struct engine_mmio {
40 int ring_id;
41 i915_reg_t reg;
42 u32 mask;
43 bool in_context;
44 u32 value;
45};
46
39void intel_gvt_switch_mmio(struct intel_vgpu *pre, 47void intel_gvt_switch_mmio(struct intel_vgpu *pre,
40 struct intel_vgpu *next, int ring_id); 48 struct intel_vgpu *next, int ring_id);
41 49
50void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
42 51
43#endif 52#endif