diff options
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 31 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_pm.c | 75 |
3 files changed, 87 insertions, 21 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 56002a52936d..243224aeabf8 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -3509,6 +3509,8 @@ extern void intel_display_print_error_state(struct drm_i915_error_state_buf *e, | |||
| 3509 | 3509 | ||
| 3510 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); | 3510 | int sandybridge_pcode_read(struct drm_i915_private *dev_priv, u32 mbox, u32 *val); |
| 3511 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); | 3511 | int sandybridge_pcode_write(struct drm_i915_private *dev_priv, u32 mbox, u32 val); |
| 3512 | int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, | ||
| 3513 | u32 reply_mask, u32 reply, int timeout_base_ms); | ||
| 3512 | 3514 | ||
| 3513 | /* intel_sideband.c */ | 3515 | /* intel_sideband.c */ |
| 3514 | u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); | 3516 | u32 vlv_punit_read(struct drm_i915_private *dev_priv, u32 addr); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4ef5a39acc69..7d234043822d 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -6244,35 +6244,24 @@ skl_dpll0_disable(struct drm_i915_private *dev_priv) | |||
| 6244 | dev_priv->cdclk_pll.vco = 0; | 6244 | dev_priv->cdclk_pll.vco = 0; |
| 6245 | } | 6245 | } |
| 6246 | 6246 | ||
| 6247 | static bool skl_cdclk_pcu_ready(struct drm_i915_private *dev_priv) | ||
| 6248 | { | ||
| 6249 | int ret; | ||
| 6250 | u32 val; | ||
| 6251 | |||
| 6252 | /* inform PCU we want to change CDCLK */ | ||
| 6253 | val = SKL_CDCLK_PREPARE_FOR_CHANGE; | ||
| 6254 | mutex_lock(&dev_priv->rps.hw_lock); | ||
| 6255 | ret = sandybridge_pcode_read(dev_priv, SKL_PCODE_CDCLK_CONTROL, &val); | ||
| 6256 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
| 6257 | |||
| 6258 | return ret == 0 && (val & SKL_CDCLK_READY_FOR_CHANGE); | ||
| 6259 | } | ||
| 6260 | |||
| 6261 | static bool skl_cdclk_wait_for_pcu_ready(struct drm_i915_private *dev_priv) | ||
| 6262 | { | ||
| 6263 | return _wait_for(skl_cdclk_pcu_ready(dev_priv), 3000, 10) == 0; | ||
| 6264 | } | ||
| 6265 | |||
| 6266 | static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) | 6247 | static void skl_set_cdclk(struct drm_i915_private *dev_priv, int cdclk, int vco) |
| 6267 | { | 6248 | { |
| 6268 | u32 freq_select, pcu_ack; | 6249 | u32 freq_select, pcu_ack; |
| 6250 | int ret; | ||
| 6269 | 6251 | ||
| 6270 | WARN_ON((cdclk == 24000) != (vco == 0)); | 6252 | WARN_ON((cdclk == 24000) != (vco == 0)); |
| 6271 | 6253 | ||
| 6272 | DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); | 6254 | DRM_DEBUG_DRIVER("Changing CDCLK to %d kHz (VCO %d kHz)\n", cdclk, vco); |
| 6273 | 6255 | ||
| 6274 | if (!skl_cdclk_wait_for_pcu_ready(dev_priv)) { | 6256 | mutex_lock(&dev_priv->rps.hw_lock); |
| 6275 | DRM_ERROR("failed to inform PCU about cdclk change\n"); | 6257 | ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL, |
| 6258 | SKL_CDCLK_PREPARE_FOR_CHANGE, | ||
| 6259 | SKL_CDCLK_READY_FOR_CHANGE, | ||
| 6260 | SKL_CDCLK_READY_FOR_CHANGE, 3); | ||
| 6261 | mutex_unlock(&dev_priv->rps.hw_lock); | ||
| 6262 | if (ret) { | ||
| 6263 | DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n", | ||
| 6264 | ret); | ||
| 6276 | return; | 6265 | return; |
| 6277 | } | 6266 | } |
| 6278 | 6267 | ||
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index cbd0f3269b2d..90e42e094b13 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c | |||
| @@ -7890,6 +7890,81 @@ int sandybridge_pcode_write(struct drm_i915_private *dev_priv, | |||
| 7890 | return 0; | 7890 | return 0; |
| 7891 | } | 7891 | } |
| 7892 | 7892 | ||
| 7893 | static bool skl_pcode_try_request(struct drm_i915_private *dev_priv, u32 mbox, | ||
| 7894 | u32 request, u32 reply_mask, u32 reply, | ||
| 7895 | u32 *status) | ||
| 7896 | { | ||
| 7897 | u32 val = request; | ||
| 7898 | |||
| 7899 | *status = sandybridge_pcode_read(dev_priv, mbox, &val); | ||
| 7900 | |||
| 7901 | return *status || ((val & reply_mask) == reply); | ||
| 7902 | } | ||
| 7903 | |||
| 7904 | /** | ||
| 7905 | * skl_pcode_request - send PCODE request until acknowledgment | ||
| 7906 | * @dev_priv: device private | ||
| 7907 | * @mbox: PCODE mailbox ID the request is targeted for | ||
| 7908 | * @request: request ID | ||
| 7909 | * @reply_mask: mask used to check for request acknowledgment | ||
| 7910 | * @reply: value used to check for request acknowledgment | ||
| 7911 | * @timeout_base_ms: timeout for polling with preemption enabled | ||
| 7912 | * | ||
| 7913 | * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE | ||
| 7914 | * reports an error or an overall timeout of @timeout_base_ms+10 ms expires. | ||
| 7915 | * The request is acknowledged once the PCODE reply dword equals @reply after | ||
| 7916 | * applying @reply_mask. Polling is first attempted with preemption enabled | ||
| 7917 | * for @timeout_base_ms and if this times out for another 10 ms with | ||
| 7918 | * preemption disabled. | ||
| 7919 | * | ||
| 7920 | * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some | ||
| 7921 | * other error as reported by PCODE. | ||
| 7922 | */ | ||
| 7923 | int skl_pcode_request(struct drm_i915_private *dev_priv, u32 mbox, u32 request, | ||
| 7924 | u32 reply_mask, u32 reply, int timeout_base_ms) | ||
| 7925 | { | ||
| 7926 | u32 status; | ||
| 7927 | int ret; | ||
| 7928 | |||
| 7929 | WARN_ON(!mutex_is_locked(&dev_priv->rps.hw_lock)); | ||
| 7930 | |||
| 7931 | #define COND skl_pcode_try_request(dev_priv, mbox, request, reply_mask, reply, \ | ||
| 7932 | &status) | ||
| 7933 | |||
| 7934 | /* | ||
| 7935 | * Prime the PCODE by doing a request first. Normally it guarantees | ||
| 7936 | * that a subsequent request, at most @timeout_base_ms later, succeeds. | ||
| 7937 | * _wait_for() doesn't guarantee when its passed condition is evaluated | ||
| 7938 | * first, so send the first request explicitly. | ||
| 7939 | */ | ||
| 7940 | if (COND) { | ||
| 7941 | ret = 0; | ||
| 7942 | goto out; | ||
| 7943 | } | ||
| 7944 | ret = _wait_for(COND, timeout_base_ms * 1000, 10); | ||
| 7945 | if (!ret) | ||
| 7946 | goto out; | ||
| 7947 | |||
| 7948 | /* | ||
| 7949 | * The above can time out if the number of requests was low (2 in the | ||
| 7950 | * worst case) _and_ PCODE was busy for some reason even after a | ||
| 7951 | * (queued) request and @timeout_base_ms delay. As a workaround retry | ||
| 7952 | * the poll with preemption disabled to maximize the number of | ||
| 7953 | * requests. Increase the timeout from @timeout_base_ms to 10ms to | ||
| 7954 | * account for interrupts that could reduce the number of these | ||
| 7955 | * requests. | ||
| 7956 | */ | ||
| 7957 | DRM_DEBUG_KMS("PCODE timeout, retrying with preemption disabled\n"); | ||
| 7958 | WARN_ON_ONCE(timeout_base_ms > 3); | ||
| 7959 | preempt_disable(); | ||
| 7960 | ret = wait_for_atomic(COND, 10); | ||
| 7961 | preempt_enable(); | ||
| 7962 | |||
| 7963 | out: | ||
| 7964 | return ret ? ret : status; | ||
| 7965 | #undef COND | ||
| 7966 | } | ||
| 7967 | |||
| 7893 | static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) | 7968 | static int byt_gpu_freq(struct drm_i915_private *dev_priv, int val) |
| 7894 | { | 7969 | { |
| 7895 | /* | 7970 | /* |
