aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/intel_pm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/intel_pm.c')
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c109
1 files changed, 84 insertions, 25 deletions
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d67974eb127a..ae2c0bb4b2e8 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -2964,24 +2964,10 @@ intel_enable_sagv(struct drm_i915_private *dev_priv)
2964 return 0; 2964 return 0;
2965} 2965}
2966 2966
2967static int
2968intel_do_sagv_disable(struct drm_i915_private *dev_priv)
2969{
2970 int ret;
2971 uint32_t temp = GEN9_SAGV_DISABLE;
2972
2973 ret = sandybridge_pcode_read(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2974 &temp);
2975 if (ret)
2976 return ret;
2977 else
2978 return temp & GEN9_SAGV_IS_DISABLED;
2979}
2980
2981int 2967int
2982intel_disable_sagv(struct drm_i915_private *dev_priv) 2968intel_disable_sagv(struct drm_i915_private *dev_priv)
2983{ 2969{
2984 int ret, result; 2970 int ret;
2985 2971
2986 if (!intel_has_sagv(dev_priv)) 2972 if (!intel_has_sagv(dev_priv))
2987 return 0; 2973 return 0;
@@ -2993,25 +2979,23 @@ intel_disable_sagv(struct drm_i915_private *dev_priv)
2993 mutex_lock(&dev_priv->rps.hw_lock); 2979 mutex_lock(&dev_priv->rps.hw_lock);
2994 2980
2995 /* bspec says to keep retrying for at least 1 ms */ 2981 /* bspec says to keep retrying for at least 1 ms */
2996 ret = wait_for(result = intel_do_sagv_disable(dev_priv), 1); 2982 ret = skl_pcode_request(dev_priv, GEN9_PCODE_SAGV_CONTROL,
2983 GEN9_SAGV_DISABLE,
2984 GEN9_SAGV_IS_DISABLED, GEN9_SAGV_IS_DISABLED,
2985 1);
2997 mutex_unlock(&dev_priv->rps.hw_lock); 2986 mutex_unlock(&dev_priv->rps.hw_lock);
2998 2987
2999 if (ret == -ETIMEDOUT) {
3000 DRM_ERROR("Request to disable SAGV timed out\n");
3001 return -ETIMEDOUT;
3002 }
3003
3004 /* 2988 /*
3005 * Some skl systems, pre-release machines in particular, 2989 * Some skl systems, pre-release machines in particular,
3006 * don't actually have an SAGV. 2990 * don't actually have an SAGV.
3007 */ 2991 */
3008 if (IS_SKYLAKE(dev_priv) && result == -ENXIO) { 2992 if (IS_SKYLAKE(dev_priv) && ret == -ENXIO) {
3009 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n"); 2993 DRM_DEBUG_DRIVER("No SAGV found on system, ignoring\n");
3010 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED; 2994 dev_priv->sagv_status = I915_SAGV_NOT_CONTROLLED;
3011 return 0; 2995 return 0;
3012 } else if (result < 0) { 2996 } else if (ret < 0) {
3013 DRM_ERROR("Failed to disable the SAGV\n"); 2997 DRM_ERROR("Failed to disable the SAGV (%d)\n", ret);
3014 return result; 2998 return ret;
3015 } 2999 }
3016 3000
3017 dev_priv->sagv_status = I915_SAGV_DISABLED; 3001 dev_priv->sagv_status = I915_SAGV_DISABLED;
@@ -7890,6 +7874,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv,
7890 return 0; 7874 return 0;
7891} 7875}
7892 7876
7877static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox,
7878 u32 request, u32 reply_mask, u32 reply,
7879 u32 *status)
7880{
7881 u32 val = request;
7882
7883 *status = sandybridge_pcode_read(dev_priv, mbox, &val);
7884
7885 return *status || ((val & reply_mask) == reply);
7886}
7887
7888/**
7889 * skl_pcode_request - send PCODE request until acknowledgment
7890 * @dev_priv: device private
7891 * @mbox: PCODE mailbox ID the request is targeted for
7892 * @request: request ID
7893 * @reply_mask: mask used to check for request acknowledgment
7894 * @reply: value used to check for request acknowledgment
7895 * @timeout_base_ms: timeout for polling with preemption enabled
7896 *
7897 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
7898 * reports an error or an overall timeout of @timeout_base_ms+10 ms expires.
7899 * The request is acknowledged once the PCODE reply dword equals @reply after
7900 * applying @reply_mask. Polling is first attempted with preemption enabled
7901 * for @timeout_base_ms and if this times out for another 10 ms with
7902 * preemption disabled.
7903 *
7904 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
7905 * other error as reported by PCODE.
7906 */
7907int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request,
7908 u32 reply_mask, u32 reply, int timeout_base_ms)
7909{
7910 u32 status;
7911 int ret;
7912
7913 WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock));
7914
7915#define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \
7916 &status)
7917
7918 /*
7919 * Prime the PCODE by doing a request first. Normally it guarantees
7920 * that a subsequent request, at most @timeout_base_ms later, succeeds.
7921 * _wait_for() doesn't guarantee when its passed condition is evaluated
7922 * first, so send the first request explicitly.
7923 */
7924 if (COND) {
7925 ret = 0;
7926 goto out;
7927 }
7928 ret = _wait_for(COND, timeout_base_ms * 1000, 10);
7929 if (!ret)
7930 goto out;
7931
7932 /*
7933 * The above can time out if the number of requests was low (2 in the
7934 * worst case) _and_ PCODE was busy for some reason even after a
7935 * (queued) request and @timeout_base_ms delay. As a workaround retry
7936 * the poll with preemption disabled to maximize the number of
7937 * requests. Increase the timeout from @timeout_base_ms to 10ms to
7938 * account for interrupts that could reduce the number of these
7939 * requests.
7940 */
7941 DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n");
7942 WARN_ON_ONCE(timeout_base_ms > 3);
7943 preempt_disable();
7944 ret = wait_for_atomic(COND, 10);
7945 preempt_enable();
7946
7947out:
7948 return ret ? ret : status;
7949#undef COND
7950}
7951
7893static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) 7952static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val)
7894{ 7953{
7895 /* 7954 /*