diff options
author | Dave Airlie <airlied@redhat.com> | 2017-12-03 18:40:35 -0500 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2017-12-03 19:56:53 -0500 |
commit | ca797d29cd63e7b71b4eea29aff3b1cefd1ecb59 (patch) | |
tree | db1ada69f713da68b43c828bd15f90e250f86ab7 /drivers/gpu/drm/i915/intel_engine_cs.c | |
parent | 2c1c55cb75a9c72f9726fabb8c3607947711a8df (diff) | |
parent | 010d118c20617021025a930bc8e90f371ab99da5 (diff) |
Merge tag 'drm-intel-next-2017-11-17-1' of git://anongit.freedesktop.org/drm/drm-intel into drm-next
More change sets for 4.16:
- Many improvements for selftests and other igt tests (Chris)
- Forcewake with PUNIT->PMIC bus fixes and robustness (Hans)
- Define an engine class for uABI (Tvrtko)
- Context switch fixes and improvements (Chris)
- GT powersavings and power gating simplification and fixes (Chris)
- Other general driver clean-ups (Chris, Lucas, Ville)
- Removing old, useless and/or bad workarounds (Chris, Oscar, Radhakrishna)
- IPS, pipe config, etc in preparation for another Fast Boot attempt (Maarten)
- OA perf fixes and support to Coffee Lake and Cannonlake (Lionel)
- Fixes around GPU fault registers (Michel)
- GEM Proxy (Tina)
- Refactor of Geminilake and Cannonlake plane color handling (James)
- Generalize transcoder loop (Mika Kahola)
- New HW Workaround for Cannonlake and Geminilake (Rodrigo)
- Resume GuC before using GEM (Chris)
- Stolen Memory handling improvements (Ville)
- Initialize entry in PPAT for older compilers (Chris)
- Other fixes and robustness improvements on execbuf (Chris)
- Improve logs of GEM_BUG_ON (Mika Kuoppala)
- Rework with massive rename of GuC functions and files (Sagar)
- Don't sanitize frame start delay if pipe is off (Ville)
- Cannonlake clock fixes (Rodrigo)
- Cannonlake HDMI 2.0 support (Rodrigo)
- Add a GuC doorbells selftest (Michel)
- Add might_sleep() check to our wait_for() (Chris)
Many GVT changes for 4.16:
- CSB HWSP update support (Weinan)
- GVT debug helpers, dyndbg and debugfs (Chuanxiao, Shuo)
- full virtualized opregion (Xiaolin)
- VM health check for sane fallback (Fred)
- workload submission code refactor for future enabling (Zhi)
- Updated repo URL in MAINTAINERS (Zhenyu)
- other many misc fixes
* tag 'drm-intel-next-2017-11-17-1' of git://anongit.freedesktop.org/drm/drm-intel: (260 commits)
drm/i915: Update DRIVER_DATE to 20171117
drm/i915: Add a policy note for removing workarounds
drm/i915/selftests: Report ENOMEM clearly for an allocation failure
Revert "drm/i915: Display WA #1133 WaFbcSkipSegments:cnl, glk"
drm/i915: Calculate g4x intermediate watermarks correctly
drm/i915: Calculate vlv/chv intermediate watermarks correctly, v3.
drm/i915: Pass crtc_state to ips toggle functions, v2
drm/i915: Pass idle crtc_state to intel_dp_sink_crc
drm/i915: Enable FIFO underrun reporting after initial fastset, v4.
drm/i915: Mark the userptr invalidate workqueue as WQ_MEM_RECLAIM
drm/i915: Add might_sleep() check to wait_for()
drm/i915/selftests: Add a GuC doorbells selftest
drm/i915/cnl: Extend HDMI 2.0 support to CNL.
drm/i915/cnl: Simplify dco_fraction calculation.
drm/i915/cnl: Don't blindly replace qdiv.
drm/i915/cnl: Fix wrpll math for higher freqs.
drm/i915/cnl: Fix, simplify and unify wrpll variable sizes.
drm/i915/cnl: Remove useless conversion.
drm/i915/cnl: Remove spurious central_freq.
drm/i915/selftests: exercise_ggtt may have nothing to do
...
Diffstat (limited to 'drivers/gpu/drm/i915/intel_engine_cs.c')
-rw-r--r-- | drivers/gpu/drm/i915/intel_engine_cs.c | 215 |
1 files changed, 127 insertions, 88 deletions
diff --git a/drivers/gpu/drm/i915/intel_engine_cs.c b/drivers/gpu/drm/i915/intel_engine_cs.c index ab5bf4e2e28e..9897c7f78c51 100644 --- a/drivers/gpu/drm/i915/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/intel_engine_cs.c | |||
@@ -50,6 +50,8 @@ struct engine_class_info { | |||
50 | const char *name; | 50 | const char *name; |
51 | int (*init_legacy)(struct intel_engine_cs *engine); | 51 | int (*init_legacy)(struct intel_engine_cs *engine); |
52 | int (*init_execlists)(struct intel_engine_cs *engine); | 52 | int (*init_execlists)(struct intel_engine_cs *engine); |
53 | |||
54 | u8 uabi_class; | ||
53 | }; | 55 | }; |
54 | 56 | ||
55 | static const struct engine_class_info intel_engine_classes[] = { | 57 | static const struct engine_class_info intel_engine_classes[] = { |
@@ -57,21 +59,25 @@ static const struct engine_class_info intel_engine_classes[] = { | |||
57 | .name = "rcs", | 59 | .name = "rcs", |
58 | .init_execlists = logical_render_ring_init, | 60 | .init_execlists = logical_render_ring_init, |
59 | .init_legacy = intel_init_render_ring_buffer, | 61 | .init_legacy = intel_init_render_ring_buffer, |
62 | .uabi_class = I915_ENGINE_CLASS_RENDER, | ||
60 | }, | 63 | }, |
61 | [COPY_ENGINE_CLASS] = { | 64 | [COPY_ENGINE_CLASS] = { |
62 | .name = "bcs", | 65 | .name = "bcs", |
63 | .init_execlists = logical_xcs_ring_init, | 66 | .init_execlists = logical_xcs_ring_init, |
64 | .init_legacy = intel_init_blt_ring_buffer, | 67 | .init_legacy = intel_init_blt_ring_buffer, |
68 | .uabi_class = I915_ENGINE_CLASS_COPY, | ||
65 | }, | 69 | }, |
66 | [VIDEO_DECODE_CLASS] = { | 70 | [VIDEO_DECODE_CLASS] = { |
67 | .name = "vcs", | 71 | .name = "vcs", |
68 | .init_execlists = logical_xcs_ring_init, | 72 | .init_execlists = logical_xcs_ring_init, |
69 | .init_legacy = intel_init_bsd_ring_buffer, | 73 | .init_legacy = intel_init_bsd_ring_buffer, |
74 | .uabi_class = I915_ENGINE_CLASS_VIDEO, | ||
70 | }, | 75 | }, |
71 | [VIDEO_ENHANCEMENT_CLASS] = { | 76 | [VIDEO_ENHANCEMENT_CLASS] = { |
72 | .name = "vecs", | 77 | .name = "vecs", |
73 | .init_execlists = logical_xcs_ring_init, | 78 | .init_execlists = logical_xcs_ring_init, |
74 | .init_legacy = intel_init_vebox_ring_buffer, | 79 | .init_legacy = intel_init_vebox_ring_buffer, |
80 | .uabi_class = I915_ENGINE_CLASS_VIDEO_ENHANCE, | ||
75 | }, | 81 | }, |
76 | }; | 82 | }; |
77 | 83 | ||
@@ -213,13 +219,15 @@ intel_engine_setup(struct drm_i915_private *dev_priv, | |||
213 | WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u", | 219 | WARN_ON(snprintf(engine->name, sizeof(engine->name), "%s%u", |
214 | class_info->name, info->instance) >= | 220 | class_info->name, info->instance) >= |
215 | sizeof(engine->name)); | 221 | sizeof(engine->name)); |
216 | engine->uabi_id = info->uabi_id; | ||
217 | engine->hw_id = engine->guc_id = info->hw_id; | 222 | engine->hw_id = engine->guc_id = info->hw_id; |
218 | engine->mmio_base = info->mmio_base; | 223 | engine->mmio_base = info->mmio_base; |
219 | engine->irq_shift = info->irq_shift; | 224 | engine->irq_shift = info->irq_shift; |
220 | engine->class = info->class; | 225 | engine->class = info->class; |
221 | engine->instance = info->instance; | 226 | engine->instance = info->instance; |
222 | 227 | ||
228 | engine->uabi_id = info->uabi_id; | ||
229 | engine->uabi_class = class_info->uabi_class; | ||
230 | |||
223 | engine->context_size = __intel_engine_context_size(dev_priv, | 231 | engine->context_size = __intel_engine_context_size(dev_priv, |
224 | engine->class); | 232 | engine->class); |
225 | if (WARN_ON(engine->context_size > BIT(20))) | 233 | if (WARN_ON(engine->context_size > BIT(20))) |
@@ -281,6 +289,8 @@ int intel_engines_init_mmio(struct drm_i915_private *dev_priv) | |||
281 | 289 | ||
282 | device_info->num_rings = hweight32(mask); | 290 | device_info->num_rings = hweight32(mask); |
283 | 291 | ||
292 | i915_check_and_clear_faults(dev_priv); | ||
293 | |||
284 | return 0; | 294 | return 0; |
285 | 295 | ||
286 | cleanup: | 296 | cleanup: |
@@ -620,7 +630,7 @@ int intel_engine_init_common(struct intel_engine_cs *engine) | |||
620 | * Similarly the preempt context must always be available so that | 630 | * Similarly the preempt context must always be available so that |
621 | * we can interrupt the engine at any time. | 631 | * we can interrupt the engine at any time. |
622 | */ | 632 | */ |
623 | if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) { | 633 | if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) { |
624 | ring = engine->context_pin(engine, | 634 | ring = engine->context_pin(engine, |
625 | engine->i915->preempt_context); | 635 | engine->i915->preempt_context); |
626 | if (IS_ERR(ring)) { | 636 | if (IS_ERR(ring)) { |
@@ -633,25 +643,19 @@ int intel_engine_init_common(struct intel_engine_cs *engine) | |||
633 | if (ret) | 643 | if (ret) |
634 | goto err_unpin_preempt; | 644 | goto err_unpin_preempt; |
635 | 645 | ||
636 | ret = i915_gem_render_state_init(engine); | ||
637 | if (ret) | ||
638 | goto err_breadcrumbs; | ||
639 | |||
640 | if (HWS_NEEDS_PHYSICAL(engine->i915)) | 646 | if (HWS_NEEDS_PHYSICAL(engine->i915)) |
641 | ret = init_phys_status_page(engine); | 647 | ret = init_phys_status_page(engine); |
642 | else | 648 | else |
643 | ret = init_status_page(engine); | 649 | ret = init_status_page(engine); |
644 | if (ret) | 650 | if (ret) |
645 | goto err_rs_fini; | 651 | goto err_breadcrumbs; |
646 | 652 | ||
647 | return 0; | 653 | return 0; |
648 | 654 | ||
649 | err_rs_fini: | ||
650 | i915_gem_render_state_fini(engine); | ||
651 | err_breadcrumbs: | 655 | err_breadcrumbs: |
652 | intel_engine_fini_breadcrumbs(engine); | 656 | intel_engine_fini_breadcrumbs(engine); |
653 | err_unpin_preempt: | 657 | err_unpin_preempt: |
654 | if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) | 658 | if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) |
655 | engine->context_unpin(engine, engine->i915->preempt_context); | 659 | engine->context_unpin(engine, engine->i915->preempt_context); |
656 | err_unpin_kernel: | 660 | err_unpin_kernel: |
657 | engine->context_unpin(engine, engine->i915->kernel_context); | 661 | engine->context_unpin(engine, engine->i915->kernel_context); |
@@ -674,12 +678,14 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine) | |||
674 | else | 678 | else |
675 | cleanup_status_page(engine); | 679 | cleanup_status_page(engine); |
676 | 680 | ||
677 | i915_gem_render_state_fini(engine); | ||
678 | intel_engine_fini_breadcrumbs(engine); | 681 | intel_engine_fini_breadcrumbs(engine); |
679 | intel_engine_cleanup_cmd_parser(engine); | 682 | intel_engine_cleanup_cmd_parser(engine); |
680 | i915_gem_batch_pool_fini(&engine->batch_pool); | 683 | i915_gem_batch_pool_fini(&engine->batch_pool); |
681 | 684 | ||
682 | if (INTEL_INFO(engine->i915)->has_logical_ring_preemption) | 685 | if (engine->default_state) |
686 | i915_gem_object_put(engine->default_state); | ||
687 | |||
688 | if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) | ||
683 | engine->context_unpin(engine, engine->i915->preempt_context); | 689 | engine->context_unpin(engine, engine->i915->preempt_context); |
684 | engine->context_unpin(engine, engine->i915->kernel_context); | 690 | engine->context_unpin(engine, engine->i915->kernel_context); |
685 | } | 691 | } |
@@ -1014,22 +1020,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
1014 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, | 1020 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, |
1015 | GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); | 1021 | GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC); |
1016 | 1022 | ||
1017 | /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */ | ||
1018 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) | ||
1019 | WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, | ||
1020 | GEN9_DG_MIRROR_FIX_ENABLE); | ||
1021 | |||
1022 | /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */ | ||
1023 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { | ||
1024 | WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1, | ||
1025 | GEN9_RHWO_OPTIMIZATION_DISABLE); | ||
1026 | /* | ||
1027 | * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set | ||
1028 | * but we do that in per ctx batchbuffer as there is an issue | ||
1029 | * with this register not getting restored on ctx restore | ||
1030 | */ | ||
1031 | } | ||
1032 | |||
1033 | /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ | 1023 | /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */ |
1034 | /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ | 1024 | /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */ |
1035 | WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, | 1025 | WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7, |
@@ -1045,11 +1035,6 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
1045 | WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, | 1035 | WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5, |
1046 | GEN9_CCS_TLB_PREFETCH_ENABLE); | 1036 | GEN9_CCS_TLB_PREFETCH_ENABLE); |
1047 | 1037 | ||
1048 | /* WaDisableMaskBasedCammingInRCC:bxt */ | ||
1049 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) | ||
1050 | WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0, | ||
1051 | PIXEL_MASK_CAMMING_DISABLE); | ||
1052 | |||
1053 | /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ | 1038 | /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */ |
1054 | WA_SET_BIT_MASKED(HDC_CHICKEN0, | 1039 | WA_SET_BIT_MASKED(HDC_CHICKEN0, |
1055 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | | 1040 | HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT | |
@@ -1079,8 +1064,7 @@ static int gen9_init_workarounds(struct intel_engine_cs *engine) | |||
1079 | /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ | 1064 | /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */ |
1080 | if (IS_SKYLAKE(dev_priv) || | 1065 | if (IS_SKYLAKE(dev_priv) || |
1081 | IS_KABYLAKE(dev_priv) || | 1066 | IS_KABYLAKE(dev_priv) || |
1082 | IS_COFFEELAKE(dev_priv) || | 1067 | IS_COFFEELAKE(dev_priv)) |
1083 | IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) | ||
1084 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, | 1068 | WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3, |
1085 | GEN8_SAMPLER_POWER_BYPASS_DIS); | 1069 | GEN8_SAMPLER_POWER_BYPASS_DIS); |
1086 | 1070 | ||
@@ -1204,72 +1188,35 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) | |||
1204 | static int bxt_init_workarounds(struct intel_engine_cs *engine) | 1188 | static int bxt_init_workarounds(struct intel_engine_cs *engine) |
1205 | { | 1189 | { |
1206 | struct drm_i915_private *dev_priv = engine->i915; | 1190 | struct drm_i915_private *dev_priv = engine->i915; |
1191 | u32 val; | ||
1207 | int ret; | 1192 | int ret; |
1208 | 1193 | ||
1209 | ret = gen9_init_workarounds(engine); | 1194 | ret = gen9_init_workarounds(engine); |
1210 | if (ret) | 1195 | if (ret) |
1211 | return ret; | 1196 | return ret; |
1212 | 1197 | ||
1213 | /* WaStoreMultiplePTEenable:bxt */ | ||
1214 | /* This is a requirement according to Hardware specification */ | ||
1215 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) | ||
1216 | I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF); | ||
1217 | |||
1218 | /* WaSetClckGatingDisableMedia:bxt */ | ||
1219 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { | ||
1220 | I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) & | ||
1221 | ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE)); | ||
1222 | } | ||
1223 | |||
1224 | /* WaDisableThreadStallDopClockGating:bxt */ | 1198 | /* WaDisableThreadStallDopClockGating:bxt */ |
1225 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, | 1199 | WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, |
1226 | STALL_DOP_GATING_DISABLE); | 1200 | STALL_DOP_GATING_DISABLE); |
1227 | 1201 | ||
1228 | /* WaDisablePooledEuLoadBalancingFix:bxt */ | 1202 | /* WaDisablePooledEuLoadBalancingFix:bxt */ |
1229 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { | 1203 | I915_WRITE(FF_SLICE_CS_CHICKEN2, |
1230 | I915_WRITE(FF_SLICE_CS_CHICKEN2, | 1204 | _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); |
1231 | _MASKED_BIT_ENABLE(GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE)); | ||
1232 | } | ||
1233 | |||
1234 | /* WaDisableSbeCacheDispatchPortSharing:bxt */ | ||
1235 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) { | ||
1236 | WA_SET_BIT_MASKED( | ||
1237 | GEN7_HALF_SLICE_CHICKEN1, | ||
1238 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); | ||
1239 | } | ||
1240 | |||
1241 | /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */ | ||
1242 | /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */ | ||
1243 | /* WaDisableObjectLevelPreemtionForInstanceId:bxt */ | ||
1244 | /* WaDisableLSQCROPERFforOCL:bxt */ | ||
1245 | if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) { | ||
1246 | ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1); | ||
1247 | if (ret) | ||
1248 | return ret; | ||
1249 | |||
1250 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); | ||
1251 | if (ret) | ||
1252 | return ret; | ||
1253 | } | ||
1254 | 1205 | ||
1255 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ | 1206 | /* WaProgramL3SqcReg1DefaultForPerf:bxt */ |
1256 | if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) { | 1207 | val = I915_READ(GEN8_L3SQCREG1); |
1257 | u32 val = I915_READ(GEN8_L3SQCREG1); | 1208 | val &= ~L3_PRIO_CREDITS_MASK; |
1258 | val &= ~L3_PRIO_CREDITS_MASK; | 1209 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); |
1259 | val |= L3_GENERAL_PRIO_CREDITS(62) | L3_HIGH_PRIO_CREDITS(2); | 1210 | I915_WRITE(GEN8_L3SQCREG1, val); |
1260 | I915_WRITE(GEN8_L3SQCREG1, val); | ||
1261 | } | ||
1262 | 1211 | ||
1263 | /* WaToEnableHwFixForPushConstHWBug:bxt */ | 1212 | /* WaToEnableHwFixForPushConstHWBug:bxt */ |
1264 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | 1213 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
1265 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1214 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
1266 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | ||
1267 | 1215 | ||
1268 | /* WaInPlaceDecompressionHang:bxt */ | 1216 | /* WaInPlaceDecompressionHang:bxt */ |
1269 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | 1217 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, |
1270 | I915_WRITE(GEN9_GAMT_ECO_REG_RW_IA, | 1218 | (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | |
1271 | (I915_READ(GEN9_GAMT_ECO_REG_RW_IA) | | 1219 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); |
1272 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS)); | ||
1273 | 1220 | ||
1274 | return 0; | 1221 | return 0; |
1275 | } | 1222 | } |
@@ -1585,6 +1532,34 @@ bool intel_engines_are_idle(struct drm_i915_private *dev_priv) | |||
1585 | return true; | 1532 | return true; |
1586 | } | 1533 | } |
1587 | 1534 | ||
1535 | /** | ||
1536 | * intel_engine_has_kernel_context: | ||
1537 | * @engine: the engine | ||
1538 | * | ||
1539 | * Returns true if the last context to be executed on this engine, or has been | ||
1540 | * executed if the engine is already idle, is the kernel context | ||
1541 | * (#i915.kernel_context). | ||
1542 | */ | ||
1543 | bool intel_engine_has_kernel_context(const struct intel_engine_cs *engine) | ||
1544 | { | ||
1545 | const struct i915_gem_context * const kernel_context = | ||
1546 | engine->i915->kernel_context; | ||
1547 | struct drm_i915_gem_request *rq; | ||
1548 | |||
1549 | lockdep_assert_held(&engine->i915->drm.struct_mutex); | ||
1550 | |||
1551 | /* | ||
1552 | * Check the last context seen by the engine. If active, it will be | ||
1553 | * the last request that remains in the timeline. When idle, it is | ||
1554 | * the last executed context as tracked by retirement. | ||
1555 | */ | ||
1556 | rq = __i915_gem_active_peek(&engine->timeline->last_request); | ||
1557 | if (rq) | ||
1558 | return rq->ctx == kernel_context; | ||
1559 | else | ||
1560 | return engine->last_retired_context == kernel_context; | ||
1561 | } | ||
1562 | |||
1588 | void intel_engines_reset_default_submission(struct drm_i915_private *i915) | 1563 | void intel_engines_reset_default_submission(struct drm_i915_private *i915) |
1589 | { | 1564 | { |
1590 | struct intel_engine_cs *engine; | 1565 | struct intel_engine_cs *engine; |
@@ -1594,19 +1569,63 @@ void intel_engines_reset_default_submission(struct drm_i915_private *i915) | |||
1594 | engine->set_default_submission(engine); | 1569 | engine->set_default_submission(engine); |
1595 | } | 1570 | } |
1596 | 1571 | ||
1597 | void intel_engines_mark_idle(struct drm_i915_private *i915) | 1572 | /** |
1573 | * intel_engines_park: called when the GT is transitioning from busy->idle | ||
1574 | * @i915: the i915 device | ||
1575 | * | ||
1576 | * The GT is now idle and about to go to sleep (maybe never to wake again?). | ||
1577 | * Time for us to tidy and put away our toys (release resources back to the | ||
1578 | * system). | ||
1579 | */ | ||
1580 | void intel_engines_park(struct drm_i915_private *i915) | ||
1598 | { | 1581 | { |
1599 | struct intel_engine_cs *engine; | 1582 | struct intel_engine_cs *engine; |
1600 | enum intel_engine_id id; | 1583 | enum intel_engine_id id; |
1601 | 1584 | ||
1602 | for_each_engine(engine, i915, id) { | 1585 | for_each_engine(engine, i915, id) { |
1586 | /* Flush the residual irq tasklets first. */ | ||
1603 | intel_engine_disarm_breadcrumbs(engine); | 1587 | intel_engine_disarm_breadcrumbs(engine); |
1588 | tasklet_kill(&engine->execlists.tasklet); | ||
1589 | |||
1590 | /* | ||
1591 | * We are committed now to parking the engines, make sure there | ||
1592 | * will be no more interrupts arriving later and the engines | ||
1593 | * are truly idle. | ||
1594 | */ | ||
1595 | if (wait_for(intel_engine_is_idle(engine), 10)) { | ||
1596 | struct drm_printer p = drm_debug_printer(__func__); | ||
1597 | |||
1598 | dev_err(i915->drm.dev, | ||
1599 | "%s is not idle before parking\n", | ||
1600 | engine->name); | ||
1601 | intel_engine_dump(engine, &p); | ||
1602 | } | ||
1603 | |||
1604 | if (engine->park) | ||
1605 | engine->park(engine); | ||
1606 | |||
1604 | i915_gem_batch_pool_fini(&engine->batch_pool); | 1607 | i915_gem_batch_pool_fini(&engine->batch_pool); |
1605 | tasklet_kill(&engine->execlists.irq_tasklet); | ||
1606 | engine->execlists.no_priolist = false; | 1608 | engine->execlists.no_priolist = false; |
1607 | } | 1609 | } |
1608 | } | 1610 | } |
1609 | 1611 | ||
1612 | /** | ||
1613 | * intel_engines_unpark: called when the GT is transitioning from idle->busy | ||
1614 | * @i915: the i915 device | ||
1615 | * | ||
1616 | * The GT was idle and now about to fire up with some new user requests. | ||
1617 | */ | ||
1618 | void intel_engines_unpark(struct drm_i915_private *i915) | ||
1619 | { | ||
1620 | struct intel_engine_cs *engine; | ||
1621 | enum intel_engine_id id; | ||
1622 | |||
1623 | for_each_engine(engine, i915, id) { | ||
1624 | if (engine->unpark) | ||
1625 | engine->unpark(engine); | ||
1626 | } | ||
1627 | } | ||
1628 | |||
1610 | bool intel_engine_can_store_dword(struct intel_engine_cs *engine) | 1629 | bool intel_engine_can_store_dword(struct intel_engine_cs *engine) |
1611 | { | 1630 | { |
1612 | switch (INTEL_GEN(engine->i915)) { | 1631 | switch (INTEL_GEN(engine->i915)) { |
@@ -1622,6 +1641,20 @@ bool intel_engine_can_store_dword(struct intel_engine_cs *engine) | |||
1622 | } | 1641 | } |
1623 | } | 1642 | } |
1624 | 1643 | ||
1644 | unsigned int intel_engines_has_context_isolation(struct drm_i915_private *i915) | ||
1645 | { | ||
1646 | struct intel_engine_cs *engine; | ||
1647 | enum intel_engine_id id; | ||
1648 | unsigned int which; | ||
1649 | |||
1650 | which = 0; | ||
1651 | for_each_engine(engine, i915, id) | ||
1652 | if (engine->default_state) | ||
1653 | which |= BIT(engine->uabi_class); | ||
1654 | |||
1655 | return which; | ||
1656 | } | ||
1657 | |||
1625 | static void print_request(struct drm_printer *m, | 1658 | static void print_request(struct drm_printer *m, |
1626 | struct drm_i915_gem_request *rq, | 1659 | struct drm_i915_gem_request *rq, |
1627 | const char *prefix) | 1660 | const char *prefix) |
@@ -1688,9 +1721,14 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) | |||
1688 | drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", | 1721 | drm_printf(m, "\tRING_TAIL: 0x%08x [0x%08x]\n", |
1689 | I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, | 1722 | I915_READ(RING_TAIL(engine->mmio_base)) & TAIL_ADDR, |
1690 | rq ? rq->ring->tail : 0); | 1723 | rq ? rq->ring->tail : 0); |
1691 | drm_printf(m, "\tRING_CTL: 0x%08x [%s]\n", | 1724 | drm_printf(m, "\tRING_CTL: 0x%08x%s\n", |
1692 | I915_READ(RING_CTL(engine->mmio_base)), | 1725 | I915_READ(RING_CTL(engine->mmio_base)), |
1693 | I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? "waiting" : ""); | 1726 | I915_READ(RING_CTL(engine->mmio_base)) & (RING_WAIT | RING_WAIT_SEMAPHORE) ? " [waiting]" : ""); |
1727 | if (INTEL_GEN(engine->i915) > 2) { | ||
1728 | drm_printf(m, "\tRING_MODE: 0x%08x%s\n", | ||
1729 | I915_READ(RING_MI_MODE(engine->mmio_base)), | ||
1730 | I915_READ(RING_MI_MODE(engine->mmio_base)) & (MODE_IDLE) ? " [idle]" : ""); | ||
1731 | } | ||
1694 | 1732 | ||
1695 | rcu_read_unlock(); | 1733 | rcu_read_unlock(); |
1696 | 1734 | ||
@@ -1781,6 +1819,7 @@ void intel_engine_dump(struct intel_engine_cs *engine, struct drm_printer *m) | |||
1781 | } | 1819 | } |
1782 | spin_unlock_irq(&b->rb_lock); | 1820 | spin_unlock_irq(&b->rb_lock); |
1783 | 1821 | ||
1822 | drm_printf(m, "Idle? %s\n", yesno(intel_engine_is_idle(engine))); | ||
1784 | drm_printf(m, "\n"); | 1823 | drm_printf(m, "\n"); |
1785 | } | 1824 | } |
1786 | 1825 | ||