diff options
author | Mahesh Kumar <mahesh1.kumar@intel.com> | 2018-04-26 10:25:16 -0400 |
---|---|---|
committer | Rodrigo Vivi <rodrigo.vivi@intel.com> | 2018-04-27 20:11:51 -0400 |
commit | aa9664ffe863f470efdbe40ea20ce96f2887ebcd (patch) | |
tree | e903e51518892208579adadee50c0008aacfa34f | |
parent | 74bd8004e475d67eb41f6795cda5efac03d010b8 (diff) |
drm/i915/icl: Enable 2nd DBuf slice only when needed
ICL has two slices of DBuf, each slice of size 1024 blocks.
We should not always enable slice-2. It should be enabled only if
display total required BW is > 12GBps OR more than 1 pipes are enabled.
Changes since V1:
- typecast total_data_rate to u64 before multiplication to solve any
possible overflow (Rodrigo)
- fix where skl_wm_get_hw_state was memsetting ddb, resulting
enabled_slices to become zero
- Fix the logic of calculating ddb_size
Changes since V2:
- If no-crtc is part of commit required_slices will have value "0",
don't try to disable DBuf slice.
Changes since V3:
- Create a generic helper to enable/disable slice
- don't return early if total_data_rate is 0, it may be cursor only
commit, or atomic modeset without any plane.
Changes since V4:
- Solve checkpatch warnings
- use kernel types u8/u64 instead of uint8_t/uint64_t
Changes since V5:
- Rebase
Signed-off-by: Mahesh Kumar <mahesh1.kumar@intel.com>
Reviewed-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180426142517.16643-3-mahesh1.kumar@intel.com
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 10 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 6 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 57 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_runtime_pm.c | 65 |
4 files changed, 113 insertions, 25 deletions
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 338570e61a1f..84ce66be88f2 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -12258,6 +12258,8 @@ static void skl_update_crtcs(struct drm_atomic_state *state) | |||
12258 | bool progress; | 12258 | bool progress; |
12259 | enum pipe pipe; | 12259 | enum pipe pipe; |
12260 | int i; | 12260 | int i; |
12261 | u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; | ||
12262 | u8 required_slices = intel_state->wm_results.ddb.enabled_slices; | ||
12261 | 12263 | ||
12262 | const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; | 12264 | const struct skl_ddb_entry *entries[I915_MAX_PIPES] = {}; |
12263 | 12265 | ||
@@ -12266,6 +12268,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state) | |||
12266 | if (new_crtc_state->active) | 12268 | if (new_crtc_state->active) |
12267 | entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; | 12269 | entries[i] = &to_intel_crtc_state(old_crtc_state)->wm.skl.ddb; |
12268 | 12270 | ||
12271 | /* If 2nd DBuf slice required, enable it here */ | ||
12272 | if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices) | ||
12273 | icl_dbuf_slices_update(dev_priv, required_slices); | ||
12274 | |||
12269 | /* | 12275 | /* |
12270 | * Whenever the number of active pipes changes, we need to make sure we | 12276 | * Whenever the number of active pipes changes, we need to make sure we |
12271 | * update the pipes in the right order so that their ddb allocations | 12277 | * update the pipes in the right order so that their ddb allocations |
@@ -12316,6 +12322,10 @@ static void skl_update_crtcs(struct drm_atomic_state *state) | |||
12316 | progress = true; | 12322 | progress = true; |
12317 | } | 12323 | } |
12318 | } while (progress); | 12324 | } while (progress); |
12325 | |||
12326 | /* If 2nd DBuf slice is no more required disable it */ | ||
12327 | if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices) | ||
12328 | icl_dbuf_slices_update(dev_priv, required_slices); | ||
12319 | } | 12329 | } |
12320 | 12330 | ||
12321 | static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) | 12331 | static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 9bba0354ccd3..11a1932cde6e 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -144,6 +144,10 @@ | |||
144 | #define KHz(x) (1000 * (x)) | 144 | #define KHz(x) (1000 * (x)) |
145 | #define MHz(x) KHz(1000 * (x)) | 145 | #define MHz(x) KHz(1000 * (x)) |
146 | 146 | ||
147 | #define KBps(x) (1000 * (x)) | ||
148 | #define MBps(x) KBps(1000 * (x)) | ||
149 | #define GBps(x) ((u64)1000 * MBps((x))) | ||
150 | |||
147 | /* | 151 | /* |
148 | * Display related stuff | 152 | * Display related stuff |
149 | */ | 153 | */ |
@@ -1931,6 +1935,8 @@ bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv, | |||
1931 | enum intel_display_power_domain domain); | 1935 | enum intel_display_power_domain domain); |
1932 | void intel_display_power_put(struct drm_i915_private *dev_priv, | 1936 | void intel_display_power_put(struct drm_i915_private *dev_priv, |
1933 | enum intel_display_power_domain domain); | 1937 | enum intel_display_power_domain domain); |
1938 | void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, | ||
1939 | u8 req_slices); | ||
1934 | 1940 | ||
1935 | static inline void | 1941 | static inline void |
1936 | assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv) | 1942 | assert_rpm_device_not_suspended(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a29e6d512771..3e72e9eb736e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
@@ -3771,9 +3771,42 @@ bool intel_can_enable_sagv(struct drm_atomic_state *state) | |||
3771 | return true; | 3771 | return true; |
3772 | } | 3772 | } |
3773 | 3773 | ||
3774 | static unsigned int intel_get_ddb_size(struct drm_i915_private *dev_priv, | ||
3775 | const struct intel_crtc_state *cstate, | ||
3776 | const unsigned int total_data_rate, | ||
3777 | const int num_active, | ||
3778 | struct skl_ddb_allocation *ddb) | ||
3779 | { | ||
3780 | const struct drm_display_mode *adjusted_mode; | ||
3781 | u64 total_data_bw; | ||
3782 | u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size; | ||
3783 | |||
3784 | WARN_ON(ddb_size == 0); | ||
3785 | |||
3786 | if (INTEL_GEN(dev_priv) < 11) | ||
3787 | return ddb_size - 4; /* 4 blocks for bypass path allocation */ | ||
3788 | |||
3789 | adjusted_mode = &cstate->base.adjusted_mode; | ||
3790 | total_data_bw = (u64)total_data_rate * drm_mode_vrefresh(adjusted_mode); | ||
3791 | |||
3792 | /* | ||
3793 | * 12GB/s is maximum BW supported by single DBuf slice. | ||
3794 | */ | ||
3795 | if (total_data_bw >= GBps(12) || num_active > 1) { | ||
3796 | ddb->enabled_slices = 2; | ||
3797 | } else { | ||
3798 | ddb->enabled_slices = 1; | ||
3799 | ddb_size /= 2; | ||
3800 | } | ||
3801 | |||
3802 | return ddb_size; | ||
3803 | } | ||
3804 | |||
3774 | static void | 3805 | static void |
3775 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | 3806 | skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, |
3776 | const struct intel_crtc_state *cstate, | 3807 | const struct intel_crtc_state *cstate, |
3808 | const unsigned int total_data_rate, | ||
3809 | struct skl_ddb_allocation *ddb, | ||
3777 | struct skl_ddb_entry *alloc, /* out */ | 3810 | struct skl_ddb_entry *alloc, /* out */ |
3778 | int *num_active /* out */) | 3811 | int *num_active /* out */) |
3779 | { | 3812 | { |
@@ -3796,11 +3829,8 @@ skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, | |||
3796 | else | 3829 | else |
3797 | *num_active = hweight32(dev_priv->active_crtcs); | 3830 | *num_active = hweight32(dev_priv->active_crtcs); |
3798 | 3831 | ||
3799 | ddb_size = INTEL_INFO(dev_priv)->ddb_size; | 3832 | ddb_size = intel_get_ddb_size(dev_priv, cstate, total_data_rate, |
3800 | WARN_ON(ddb_size == 0); | 3833 | *num_active, ddb); |
3801 | |||
3802 | if (INTEL_GEN(dev_priv) < 11) | ||
3803 | ddb_size -= 4; /* 4 blocks for bypass path allocation */ | ||
3804 | 3834 | ||
3805 | /* | 3835 | /* |
3806 | * If the state doesn't change the active CRTC's, then there's | 3836 | * If the state doesn't change the active CRTC's, then there's |
@@ -4261,7 +4291,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4261 | return 0; | 4291 | return 0; |
4262 | } | 4292 | } |
4263 | 4293 | ||
4264 | skl_ddb_get_pipe_allocation_limits(dev, cstate, alloc, &num_active); | 4294 | total_data_rate = skl_get_total_relative_data_rate(cstate, |
4295 | plane_data_rate, | ||
4296 | uv_plane_data_rate); | ||
4297 | skl_ddb_get_pipe_allocation_limits(dev, cstate, total_data_rate, ddb, | ||
4298 | alloc, &num_active); | ||
4265 | alloc_size = skl_ddb_entry_size(alloc); | 4299 | alloc_size = skl_ddb_entry_size(alloc); |
4266 | if (alloc_size == 0) | 4300 | if (alloc_size == 0) |
4267 | return 0; | 4301 | return 0; |
@@ -4296,9 +4330,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *cstate, | |||
4296 | * | 4330 | * |
4297 | * FIXME: we may not allocate every single block here. | 4331 | * FIXME: we may not allocate every single block here. |
4298 | */ | 4332 | */ |
4299 | total_data_rate = skl_get_total_relative_data_rate(cstate, | ||
4300 | plane_data_rate, | ||
4301 | uv_plane_data_rate); | ||
4302 | if (total_data_rate == 0) | 4333 | if (total_data_rate == 0) |
4303 | return 0; | 4334 | return 0; |
4304 | 4335 | ||
@@ -5492,8 +5523,12 @@ void skl_wm_get_hw_state(struct drm_device *dev) | |||
5492 | /* Fully recompute DDB on first atomic commit */ | 5523 | /* Fully recompute DDB on first atomic commit */ |
5493 | dev_priv->wm.distrust_bios_wm = true; | 5524 | dev_priv->wm.distrust_bios_wm = true; |
5494 | } else { | 5525 | } else { |
5495 | /* Easy/common case; just sanitize DDB now if everything off */ | 5526 | /* |
5496 | memset(ddb, 0, sizeof(*ddb)); | 5527 | * Easy/common case; just sanitize DDB now if everything off |
5528 | * Keep dbuf slice info intact | ||
5529 | */ | ||
5530 | memset(ddb->plane, 0, sizeof(ddb->plane)); | ||
5531 | memset(ddb->uv_plane, 0, sizeof(ddb->uv_plane)); | ||
5497 | } | 5532 | } |
5498 | } | 5533 | } |
5499 | 5534 | ||
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index afc6ef81ca0c..3fffbfe4521d 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
@@ -2619,32 +2619,69 @@ static void intel_power_domains_sync_hw(struct drm_i915_private *dev_priv) | |||
2619 | mutex_unlock(&power_domains->lock); | 2619 | mutex_unlock(&power_domains->lock); |
2620 | } | 2620 | } |
2621 | 2621 | ||
2622 | static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) | 2622 | static inline |
2623 | bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv, | ||
2624 | i915_reg_t reg, bool enable) | ||
2623 | { | 2625 | { |
2624 | I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) | DBUF_POWER_REQUEST); | 2626 | u32 val, status; |
2625 | POSTING_READ(DBUF_CTL); | ||
2626 | 2627 | ||
2628 | val = I915_READ(reg); | ||
2629 | val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST); | ||
2630 | I915_WRITE(reg, val); | ||
2631 | POSTING_READ(reg); | ||
2627 | udelay(10); | 2632 | udelay(10); |
2628 | 2633 | ||
2629 | if (!(I915_READ(DBUF_CTL) & DBUF_POWER_STATE)) | 2634 | status = I915_READ(reg) & DBUF_POWER_STATE; |
2630 | DRM_ERROR("DBuf power enable timeout\n"); | 2635 | if ((enable && !status) || (!enable && status)) { |
2636 | DRM_ERROR("DBus power %s timeout!\n", | ||
2637 | enable ? "enable" : "disable"); | ||
2638 | return false; | ||
2639 | } | ||
2640 | return true; | ||
2641 | } | ||
2642 | |||
2643 | static void gen9_dbuf_enable(struct drm_i915_private *dev_priv) | ||
2644 | { | ||
2645 | intel_dbuf_slice_set(dev_priv, DBUF_CTL, true); | ||
2631 | } | 2646 | } |
2632 | 2647 | ||
2633 | static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) | 2648 | static void gen9_dbuf_disable(struct drm_i915_private *dev_priv) |
2634 | { | 2649 | { |
2635 | I915_WRITE(DBUF_CTL, I915_READ(DBUF_CTL) & ~DBUF_POWER_REQUEST); | 2650 | intel_dbuf_slice_set(dev_priv, DBUF_CTL, false); |
2636 | POSTING_READ(DBUF_CTL); | 2651 | } |
2637 | 2652 | ||
2638 | udelay(10); | 2653 | static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv) |
2654 | { | ||
2655 | if (INTEL_GEN(dev_priv) < 11) | ||
2656 | return 1; | ||
2657 | return 2; | ||
2658 | } | ||
2639 | 2659 | ||
2640 | if (I915_READ(DBUF_CTL) & DBUF_POWER_STATE) | 2660 | void icl_dbuf_slices_update(struct drm_i915_private *dev_priv, |
2641 | DRM_ERROR("DBuf power disable timeout!\n"); | 2661 | u8 req_slices) |
2662 | { | ||
2663 | u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices; | ||
2664 | u32 val; | ||
2665 | bool ret; | ||
2666 | |||
2667 | if (req_slices > intel_dbuf_max_slices(dev_priv)) { | ||
2668 | DRM_ERROR("Invalid number of dbuf slices requested\n"); | ||
2669 | return; | ||
2670 | } | ||
2671 | |||
2672 | if (req_slices == hw_enabled_slices || req_slices == 0) | ||
2673 | return; | ||
2674 | |||
2675 | val = I915_READ(DBUF_CTL_S2); | ||
2676 | if (req_slices > hw_enabled_slices) | ||
2677 | ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true); | ||
2678 | else | ||
2679 | ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false); | ||
2680 | |||
2681 | if (ret) | ||
2682 | dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices; | ||
2642 | } | 2683 | } |
2643 | 2684 | ||
2644 | /* | ||
2645 | * TODO: we shouldn't always enable DBUF_CTL_S2, we should only enable it when | ||
2646 | * needed and keep it disabled as much as possible. | ||
2647 | */ | ||
2648 | static void icl_dbuf_enable(struct drm_i915_private *dev_priv) | 2685 | static void icl_dbuf_enable(struct drm_i915_private *dev_priv) |
2649 | { | 2686 | { |
2650 | I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); | 2687 | I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST); |