aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c270
1 files changed, 264 insertions, 6 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d5deb58a2128..53e13c10e4ea 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2852,6 +2852,7 @@ bool ilk_disable_lp_wm(struct drm_device *dev)
2852 2852
2853#define SKL_DDB_SIZE 896 /* in blocks */ 2853#define SKL_DDB_SIZE 896 /* in blocks */
2854#define BXT_DDB_SIZE 512 2854#define BXT_DDB_SIZE 512
2855#define SKL_SAGV_BLOCK_TIME 30 /* µs */
2855 2856
2856/* 2857/*
2857 * Return the index of a plane in the SKL DDB and wm result arrays. Primary 2858 * Return the index of a plane in the SKL DDB and wm result arrays. Primary
@@ -2875,6 +2876,153 @@ skl_wm_plane_id(const struct intel_plane *plane)
2875 } 2876 }
2876} 2877}
2877 2878
2879/*
2880 * SAGV dynamically adjusts the system agent voltage and clock frequencies
2881 * depending on power and performance requirements. The display engine access
2882 * to system memory is blocked during the adjustment time. Because of the
2883 * blocking time, having this enabled can cause full system hangs and/or pipe
2884 * underruns if we don't meet all of the following requirements:
2885 *
2886 * - <= 1 pipe enabled
2887 * - All planes can enable watermarks for latencies >= SAGV engine block time
2888 * - We're not using an interlaced display configuration
2889 */
2890int
2891skl_enable_sagv(struct drm_i915_private *dev_priv)
2892{
2893 int ret;
2894
2895 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
2896 dev_priv->skl_sagv_status == I915_SKL_SAGV_ENABLED)
2897 return 0;
2898
2899 DRM_DEBUG_KMS("Enabling the SAGV\n");
2900 mutex_lock(&dev_priv->rps.hw_lock);
2901
2902 ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2903 GEN9_SAGV_ENABLE);
2904
2905 /* We don't need to wait for the SAGV when enabling */
2906 mutex_unlock(&dev_priv->rps.hw_lock);
2907
2908 /*
2909 * Some skl systems, pre-release machines in particular,
2910 * don't actually have an SAGV.
2911 */
2912 if (ret == -ENXIO) {
2913 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2914 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
2915 return 0;
2916 } else if (ret < 0) {
2917 DRM_ERROR("Failed to enable the SAGV\n");
2918 return ret;
2919 }
2920
2921 dev_priv->skl_sagv_status = I915_SKL_SAGV_ENABLED;
2922 return 0;
2923}
2924
2925static int
2926skl_do_sagv_disable(struct drm_i915_private *dev_priv)
2927{
2928 int ret;
2929 uint32_t temp = GEN9_SAGV_DISABLE;
2930
2931 ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2932 &temp);
2933 if (ret)
2934 return ret;
2935 else
2936 return temp & GEN9_SAGV_IS_DISABLED;
2937}
2938
2939int
2940skl_disable_sagv(struct drm_i915_private *dev_priv)
2941{
2942 int ret, result;
2943
2944 if (dev_priv->skl_sagv_status == I915_SKL_SAGV_NOT_CONTROLLED ||
2945 dev_priv->skl_sagv_status == I915_SKL_SAGV_DISABLED)
2946 return 0;
2947
2948 DRM_DEBUG_KMS("Disabling the SAGV\n");
2949 mutex_lock(&dev_priv->rps.hw_lock);
2950
2951 /* bspec says to keep retrying for at least 1 ms */
2952 ret = wait_for(result = skl_do_sagv_disable(dev_priv), 1);
2953 mutex_unlock(&dev_priv->rps.hw_lock);
2954
2955 if (ret == -ETIMEDOUT) {
2956 DRM_ERROR("Request to disable SAGV timed out\n");
2957 return -ETIMEDOUT;
2958 }
2959
2960 /*
2961 * Some skl systems, pre-release machines in particular,
2962 * don't actually have an SAGV.
2963 */
2964 if (result == -ENXIO) {
2965 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
2966 dev_priv->skl_sagv_status = I915_SKL_SAGV_NOT_CONTROLLED;
2967 return 0;
2968 } else if (result < 0) {
2969 DRM_ERROR("Failed to disable the SAGV\n");
2970 return result;
2971 }
2972
2973 dev_priv->skl_sagv_status = I915_SKL_SAGV_DISABLED;
2974 return 0;
2975}
2976
2977bool skl_can_enable_sagv(struct drm_atomic_state *state)
2978{
2979 struct drm_device *dev = state->dev;
2980 struct drm_i915_private *dev_priv = to_i915(dev);
2981 struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
2982 struct drm_crtc *crtc;
2983 enum pipe pipe;
2984 int level, plane;
2985
2986 /*
2987 * SKL workaround: bspec recommends we disable the SAGV when we have
2988 * more then one pipe enabled
2989 *
2990 * If there are no active CRTCs, no additional checks need be performed
2991 */
2992 if (hweight32(intel_state->active_crtcs) == 0)
2993 return true;
2994 else if (hweight32(intel_state->active_crtcs) > 1)
2995 return false;
2996
2997 /* Since we're now guaranteed to only have one active CRTC... */
2998 pipe = ffs(intel_state->active_crtcs) - 1;
2999 crtc = dev_priv->pipe_to_crtc_mapping[pipe];
3000
3001 if (crtc->state->mode.flags & DRM_MODE_FLAG_INTERLACE)
3002 return false;
3003
3004 for_each_plane(dev_priv, pipe, plane) {
3005 /* Skip this plane if it's not enabled */
3006 if (intel_state->wm_results.plane[pipe][plane][0] == 0)
3007 continue;
3008
3009 /* Find the highest enabled wm level for this plane */
3010 for (level = ilk_wm_max_level(dev);
3011 intel_state->wm_results.plane[pipe][plane][level] == 0; --level)
3012 { }
3013
3014 /*
3015 * If any of the planes on this pipe don't enable wm levels
3016 * that incur memory latencies higher then 30µs we can't enable
3017 * the SAGV
3018 */
3019 if (dev_priv->wm.skl_latency[level] < SKL_SAGV_BLOCK_TIME)
3020 return false;
3021 }
3022
3023 return true;
3024}
3025
2878static void 3026static void
2879skl_ddb_get_pipe_allocation_limits(struct drm_device *dev, 3027skl_ddb_get_pipe_allocation_limits(struct drm_device *dev,
2880 const struct intel_crtc_state *cstate, 3028 const struct intel_crtc_state *cstate,
@@ -3107,8 +3255,6 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *intel_cstate)
3107 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id]; 3255 total_data_rate += intel_cstate->wm.skl.plane_y_data_rate[id];
3108 } 3256 }
3109 3257
3110 WARN_ON(cstate->plane_mask && total_data_rate == 0);
3111
3112 return total_data_rate; 3258 return total_data_rate;
3113} 3259}
3114 3260
@@ -3912,9 +4058,24 @@ skl_compute_ddb(struct drm_atomic_state *state)
3912 * pretend that all pipes switched active status so that we'll 4058 * pretend that all pipes switched active status so that we'll
3913 * ensure a full DDB recompute. 4059 * ensure a full DDB recompute.
3914 */ 4060 */
3915 if (dev_priv->wm.distrust_bios_wm) 4061 if (dev_priv->wm.distrust_bios_wm) {
4062 ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
4063 state->acquire_ctx);
4064 if (ret)
4065 return ret;
4066
3916 intel_state->active_pipe_changes = ~0; 4067 intel_state->active_pipe_changes = ~0;
3917 4068
4069 /*
4070 * We usually only initialize intel_state->active_crtcs if we
4071 * we're doing a modeset; make sure this field is always
4072 * initialized during the sanitization process that happens
4073 * on the first commit too.
4074 */
4075 if (!intel_state->modeset)
4076 intel_state->active_crtcs = dev_priv->active_crtcs;
4077 }
4078
3918 /* 4079 /*
3919 * If the modeset changes which CRTC's are active, we need to 4080 * If the modeset changes which CRTC's are active, we need to
3920 * recompute the DDB allocation for *all* active pipes, even 4081 * recompute the DDB allocation for *all* active pipes, even
@@ -3943,11 +4104,33 @@ skl_compute_ddb(struct drm_atomic_state *state)
3943 ret = skl_allocate_pipe_ddb(cstate, ddb); 4104 ret = skl_allocate_pipe_ddb(cstate, ddb);
3944 if (ret) 4105 if (ret)
3945 return ret; 4106 return ret;
4107
4108 ret = drm_atomic_add_affected_planes(state, &intel_crtc->base);
4109 if (ret)
4110 return ret;
3946 } 4111 }
3947 4112
3948 return 0; 4113 return 0;
3949} 4114}
3950 4115
4116static void
4117skl_copy_wm_for_pipe(struct skl_wm_values *dst,
4118 struct skl_wm_values *src,
4119 enum pipe pipe)
4120{
4121 dst->wm_linetime[pipe] = src->wm_linetime[pipe];
4122 memcpy(dst->plane[pipe], src->plane[pipe],
4123 sizeof(dst->plane[pipe]));
4124 memcpy(dst->plane_trans[pipe], src->plane_trans[pipe],
4125 sizeof(dst->plane_trans[pipe]));
4126
4127 dst->ddb.pipe[pipe] = src->ddb.pipe[pipe];
4128 memcpy(dst->ddb.y_plane[pipe], src->ddb.y_plane[pipe],
4129 sizeof(dst->ddb.y_plane[pipe]));
4130 memcpy(dst->ddb.plane[pipe], src->ddb.plane[pipe],
4131 sizeof(dst->ddb.plane[pipe]));
4132}
4133
3951static int 4134static int
3952skl_compute_wm(struct drm_atomic_state *state) 4135skl_compute_wm(struct drm_atomic_state *state)
3953{ 4136{
@@ -4020,8 +4203,10 @@ static void skl_update_wm(struct drm_crtc *crtc)
4020 struct drm_device *dev = crtc->dev; 4203 struct drm_device *dev = crtc->dev;
4021 struct drm_i915_private *dev_priv = to_i915(dev); 4204 struct drm_i915_private *dev_priv = to_i915(dev);
4022 struct skl_wm_values *results = &dev_priv->wm.skl_results; 4205 struct skl_wm_values *results = &dev_priv->wm.skl_results;
4206 struct skl_wm_values *hw_vals = &dev_priv->wm.skl_hw;
4023 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state); 4207 struct intel_crtc_state *cstate = to_intel_crtc_state(crtc->state);
4024 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal; 4208 struct skl_pipe_wm *pipe_wm = &cstate->wm.skl.optimal;
4209 int pipe;
4025 4210
4026 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0) 4211 if ((results->dirty_pipes & drm_crtc_mask(crtc)) == 0)
4027 return; 4212 return;
@@ -4033,8 +4218,12 @@ static void skl_update_wm(struct drm_crtc *crtc)
4033 skl_write_wm_values(dev_priv, results); 4218 skl_write_wm_values(dev_priv, results);
4034 skl_flush_wm_values(dev_priv, results); 4219 skl_flush_wm_values(dev_priv, results);
4035 4220
4036 /* store the new configuration */ 4221 /*
4037 dev_priv->wm.skl_hw = *results; 4222 * Store the new configuration (but only for the pipes that have
4223 * changed; the other values weren't recomputed).
4224 */
4225 for_each_pipe_masked(dev_priv, pipe, results->dirty_pipes)
4226 skl_copy_wm_for_pipe(hw_vals, results, pipe);
4038 4227
4039 mutex_unlock(&dev_priv->wm.wm_mutex); 4228 mutex_unlock(&dev_priv->wm.wm_mutex);
4040} 4229}
@@ -7658,8 +7847,53 @@ void intel_init_pm(struct drm_device *dev)
7658 } 7847 }
7659} 7848}
7660 7849
7850static inline int gen6_check_mailbox_status(struct drm_i915_private *dev_priv)
7851{
7852 uint32_t flags =
7853 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7854
7855 switch (flags) {
7856 case GEN6_PCODE_SUCCESS:
7857 return 0;
7858 case GEN6_PCODE_UNIMPLEMENTED_CMD:
7859 case GEN6_PCODE_ILLEGAL_CMD:
7860 return -ENXIO;
7861 case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7862 return -EOVERFLOW;
7863 case GEN6_PCODE_TIMEOUT:
7864 return -ETIMEDOUT;
7865 default:
7866 MISSING_CASE(flags)
7867 return 0;
7868 }
7869}
7870
7871static inline int gen7_check_mailbox_status(struct drm_i915_private *dev_priv)
7872{
7873 uint32_t flags =
7874 I915_READ_FW(GEN6_PCODE_MAILBOX) & GEN6_PCODE_ERROR_MASK;
7875
7876 switch (flags) {
7877 case GEN6_PCODE_SUCCESS:
7878 return 0;
7879 case GEN6_PCODE_ILLEGAL_CMD:
7880 return -ENXIO;
7881 case GEN7_PCODE_TIMEOUT:
7882 return -ETIMEDOUT;
7883 case GEN7_PCODE_ILLEGAL_DATA:
7884 return -EINVAL;
7885 case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
7886 return -EOVERFLOW;
7887 default:
7888 MISSING_CASE(flags);
7889 return 0;
7890 }
7891}
7892
7661int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val) 7893int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val)
7662{ 7894{
7895 int status;
7896
7663 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7897 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7664 7898
7665 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7899 /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7686,12 +7920,25 @@ int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val
7686 *val = I915_READ_FW(GEN6_PCODE_DATA); 7920 *val = I915_READ_FW(GEN6_PCODE_DATA);
7687 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7921 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7688 7922
7923 if (INTEL_GEN(dev_priv) > 6)
7924 status = gen7_check_mailbox_status(dev_priv);
7925 else
7926 status = gen6_check_mailbox_status(dev_priv);
7927
7928 if (status) {
7929 DRM_DEBUG_DRIVER("warning: pcode (read) mailbox access failed: %d\n",
7930 status);
7931 return status;
7932 }
7933
7689 return 0; 7934 return 0;
7690} 7935}
7691 7936
7692int sandybridge_pcode_write(struct drm_i915_private *dev_priv, 7937int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7693 u32 mbox, u32 val) 7938 u32 mbox, u32 val)
7694{ 7939{
7940 int status;
7941
7695 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); 7942 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7696 7943
7697 /* GEN6_PCODE_* are outside of the forcewake domain, we can 7944 /* GEN6_PCODE_* are outside of the forcewake domain, we can
@@ -7716,6 +7963,17 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7716 7963
7717 I915_WRITE_FW(GEN6_PCODE_DATA, 0); 7964 I915_WRITE_FW(GEN6_PCODE_DATA, 0);
7718 7965
7966 if (INTEL_GEN(dev_priv) > 6)
7967 status = gen7_check_mailbox_status(dev_priv);
7968 else
7969 status = gen6_check_mailbox_status(dev_priv);
7970
7971 if (status) {
7972 DRM_DEBUG_DRIVER("warning: pcode (write) mailbox access failed: %d\n",
7973 status);
7974 return status;
7975 }
7976
7719 return 0; 7977 return 0;
7720} 7978}
7721 7979