diff options
| author | Dave Airlie <airlied@redhat.com> | 2016-07-26 20:37:01 -0400 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2016-07-26 20:37:01 -0400 |
| commit | c3f8d8645ea56e57cbd35ea20a7655be512efaa9 (patch) | |
| tree | 79dd2b021f07cffb86289ddf5519e86954368e23 | |
| parent | 9af07af948ff3a8e20920b9279821db244d1ca69 (diff) | |
| parent | f15f6ca1e706e11fd07611bd4c7f903625349b33 (diff) | |
Merge tag 'drm-intel-next-fixes-2016-07-25' of git://anongit.freedesktop.org/drm-intel into drm-next
Bunch of fixes for the 4.8 merge pull, nothing out of the ordinary. All
suitably marked up with cc: stable where needed.
* tag 'drm-intel-next-fixes-2016-07-25' of git://anongit.freedesktop.org/drm-intel:
drm/i915/gen9: Add WaInPlaceDecompressionHang
drm/i915/guc: Revert "drm/i915/guc: enable GuC loading & submission by default"
drm/i915/bxt: Fix inadvertent CPU snooping due to incorrect MOCS config
drm/i915/gen9: Clean up MOCS table definitions
drm/i915: Set legacy properties when using legacy gamma set IOCTL. (v2)
drm/i915: Enable polling when we don't have hpd
drm/i915/vlv: Disable HPD in valleyview_crt_detect_hotplug()
drm/i915/vlv: Reset the ADPA in vlv_display_power_well_init()
drm/i915/vlv: Make intel_crt_reset() per-encoder
drm/i915: Unbreak interrupts on pre-gen6
drm/i915/breadcrumbs: Queue hangcheck before sleeping
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 7 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_params.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_breadcrumbs.c | 9 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 28 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 44 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 4 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_hotplug.c | 117 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_mocs.c | 88 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 17 | ||||
| -rw-r--r-- | drivers/gpu/drm/i915/intel_runtime_pm.c | 9 |
13 files changed, 286 insertions, 60 deletions
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index b9a811750ca8..95ddd56b89f0 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
| @@ -2413,6 +2413,9 @@ static int intel_runtime_suspend(struct device *device) | |||
| 2413 | 2413 | ||
| 2414 | assert_forcewakes_inactive(dev_priv); | 2414 | assert_forcewakes_inactive(dev_priv); |
| 2415 | 2415 | ||
| 2416 | if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) | ||
| 2417 | intel_hpd_poll_init(dev_priv); | ||
| 2418 | |||
| 2416 | DRM_DEBUG_KMS("Device suspended\n"); | 2419 | DRM_DEBUG_KMS("Device suspended\n"); |
| 2417 | return 0; | 2420 | return 0; |
| 2418 | } | 2421 | } |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 03e1bfaa5a41..915a3d0acff3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
| @@ -284,6 +284,9 @@ struct i915_hotplug { | |||
| 284 | u32 short_port_mask; | 284 | u32 short_port_mask; |
| 285 | struct work_struct dig_port_work; | 285 | struct work_struct dig_port_work; |
| 286 | 286 | ||
| 287 | struct work_struct poll_init_work; | ||
| 288 | bool poll_enabled; | ||
| 289 | |||
| 287 | /* | 290 | /* |
| 288 | * if we get a HPD irq from DP and a HPD irq from non-DP | 291 | * if we get a HPD irq from DP and a HPD irq from non-DP |
| 289 | * the non-DP HPD could block the workqueue on a mode config | 292 | * the non-DP HPD could block the workqueue on a mode config |
| @@ -2743,6 +2746,8 @@ struct drm_i915_cmd_table { | |||
| 2743 | #define SKL_REVID_D0 0x3 | 2746 | #define SKL_REVID_D0 0x3 |
| 2744 | #define SKL_REVID_E0 0x4 | 2747 | #define SKL_REVID_E0 0x4 |
| 2745 | #define SKL_REVID_F0 0x5 | 2748 | #define SKL_REVID_F0 0x5 |
| 2749 | #define SKL_REVID_G0 0x6 | ||
| 2750 | #define SKL_REVID_H0 0x7 | ||
| 2746 | 2751 | ||
| 2747 | #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) | 2752 | #define IS_SKL_REVID(p, since, until) (IS_SKYLAKE(p) && IS_REVID(p, since, until)) |
| 2748 | 2753 | ||
| @@ -2957,6 +2962,8 @@ void intel_hpd_init(struct drm_i915_private *dev_priv); | |||
| 2957 | void intel_hpd_init_work(struct drm_i915_private *dev_priv); | 2962 | void intel_hpd_init_work(struct drm_i915_private *dev_priv); |
| 2958 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); | 2963 | void intel_hpd_cancel_work(struct drm_i915_private *dev_priv); |
| 2959 | bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); | 2964 | bool intel_hpd_pin_to_port(enum hpd_pin pin, enum port *port); |
| 2965 | bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin); | ||
| 2966 | void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin); | ||
| 2960 | 2967 | ||
| 2961 | /* i915_irq.c */ | 2968 | /* i915_irq.c */ |
| 2962 | static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) | 2969 | static inline void i915_queue_hangcheck(struct drm_i915_private *dev_priv) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 8f50919ba9b4..7fd44980798f 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
| @@ -1501,15 +1501,6 @@ int __i915_wait_request(struct drm_i915_gem_request *req, | |||
| 1501 | break; | 1501 | break; |
| 1502 | } | 1502 | } |
| 1503 | 1503 | ||
| 1504 | /* Ensure that even if the GPU hangs, we get woken up. | ||
| 1505 | * | ||
| 1506 | * However, note that if no one is waiting, we never notice | ||
| 1507 | * a gpu hang. Eventually, we will have to wait for a resource | ||
| 1508 | * held by the GPU and so trigger a hangcheck. In the most | ||
| 1509 | * pathological case, this will be upon memory starvation! | ||
| 1510 | */ | ||
| 1511 | i915_queue_hangcheck(req->i915); | ||
| 1512 | |||
| 1513 | timeout_remain = io_schedule_timeout(timeout_remain); | 1504 | timeout_remain = io_schedule_timeout(timeout_remain); |
| 1514 | if (timeout_remain == 0) { | 1505 | if (timeout_remain == 0) { |
| 1515 | ret = -ETIME; | 1506 | ret = -ETIME; |
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c index 8b13bfa47fba..b6e404c91eed 100644 --- a/drivers/gpu/drm/i915/i915_params.c +++ b/drivers/gpu/drm/i915/i915_params.c | |||
| @@ -54,8 +54,8 @@ struct i915_params i915 __read_mostly = { | |||
| 54 | .verbose_state_checks = 1, | 54 | .verbose_state_checks = 1, |
| 55 | .nuclear_pageflip = 0, | 55 | .nuclear_pageflip = 0, |
| 56 | .edp_vswing = 0, | 56 | .edp_vswing = 0, |
| 57 | .enable_guc_loading = -1, | 57 | .enable_guc_loading = 0, |
| 58 | .enable_guc_submission = -1, | 58 | .enable_guc_submission = 0, |
| 59 | .guc_log_level = -1, | 59 | .guc_log_level = -1, |
| 60 | .enable_dp_mst = true, | 60 | .enable_dp_mst = true, |
| 61 | .inject_load_failure = 0, | 61 | .inject_load_failure = 0, |
| @@ -203,12 +203,12 @@ MODULE_PARM_DESC(edp_vswing, | |||
| 203 | module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); | 203 | module_param_named_unsafe(enable_guc_loading, i915.enable_guc_loading, int, 0400); |
| 204 | MODULE_PARM_DESC(enable_guc_loading, | 204 | MODULE_PARM_DESC(enable_guc_loading, |
| 205 | "Enable GuC firmware loading " | 205 | "Enable GuC firmware loading " |
| 206 | "(-1=auto [default], 0=never, 1=if available, 2=required)"); | 206 | "(-1=auto, 0=never [default], 1=if available, 2=required)"); |
| 207 | 207 | ||
| 208 | module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); | 208 | module_param_named_unsafe(enable_guc_submission, i915.enable_guc_submission, int, 0400); |
| 209 | MODULE_PARM_DESC(enable_guc_submission, | 209 | MODULE_PARM_DESC(enable_guc_submission, |
| 210 | "Enable GuC submission " | 210 | "Enable GuC submission " |
| 211 | "(-1=auto [default], 0=never, 1=if available, 2=required)"); | 211 | "(-1=auto, 0=never [default], 1=if available, 2=required)"); |
| 212 | 212 | ||
| 213 | module_param_named(guc_log_level, i915.guc_log_level, int, 0400); | 213 | module_param_named(guc_log_level, i915.guc_log_level, int, 0400); |
| 214 | MODULE_PARM_DESC(guc_log_level, | 214 | MODULE_PARM_DESC(guc_log_level, |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8bfde75789f6..ce14fe09d962 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
| @@ -1686,6 +1686,9 @@ enum skl_disp_power_wells { | |||
| 1686 | 1686 | ||
| 1687 | #define GEN7_TLB_RD_ADDR _MMIO(0x4700) | 1687 | #define GEN7_TLB_RD_ADDR _MMIO(0x4700) |
| 1688 | 1688 | ||
| 1689 | #define GEN9_GAMT_ECO_REG_RW_IA _MMIO(0x4ab0) | ||
| 1690 | #define GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS (1<<18) | ||
| 1691 | |||
| 1689 | #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) | 1692 | #define GAMT_CHKN_BIT_REG _MMIO(0x4ab8) |
| 1690 | #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) | 1693 | #define GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING (1<<28) |
| 1691 | 1694 | ||
diff --git a/drivers/gpu/drm/i915/intel_breadcrumbs.c b/drivers/gpu/drm/i915/intel_breadcrumbs.c index d89b2c963618..b074f3d6d127 100644 --- a/drivers/gpu/drm/i915/intel_breadcrumbs.c +++ b/drivers/gpu/drm/i915/intel_breadcrumbs.c | |||
| @@ -93,6 +93,15 @@ static void __intel_breadcrumbs_enable_irq(struct intel_breadcrumbs *b) | |||
| 93 | if (!b->irq_enabled || | 93 | if (!b->irq_enabled || |
| 94 | test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) | 94 | test_bit(engine->id, &i915->gpu_error.missed_irq_rings)) |
| 95 | mod_timer(&b->fake_irq, jiffies + 1); | 95 | mod_timer(&b->fake_irq, jiffies + 1); |
| 96 | |||
| 97 | /* Ensure that even if the GPU hangs, we get woken up. | ||
| 98 | * | ||
| 99 | * However, note that if no one is waiting, we never notice | ||
| 100 | * a gpu hang. Eventually, we will have to wait for a resource | ||
| 101 | * held by the GPU and so trigger a hangcheck. In the most | ||
| 102 | * pathological case, this will be upon memory starvation! | ||
| 103 | */ | ||
| 104 | i915_queue_hangcheck(i915); | ||
| 96 | } | 105 | } |
| 97 | 106 | ||
| 98 | static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) | 107 | static void __intel_breadcrumbs_disable_irq(struct intel_breadcrumbs *b) |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 5819d524d917..827b6ef4e9ae 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
| @@ -329,10 +329,25 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) | |||
| 329 | struct drm_device *dev = connector->dev; | 329 | struct drm_device *dev = connector->dev; |
| 330 | struct intel_crt *crt = intel_attached_crt(connector); | 330 | struct intel_crt *crt = intel_attached_crt(connector); |
| 331 | struct drm_i915_private *dev_priv = to_i915(dev); | 331 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 332 | bool reenable_hpd; | ||
| 332 | u32 adpa; | 333 | u32 adpa; |
| 333 | bool ret; | 334 | bool ret; |
| 334 | u32 save_adpa; | 335 | u32 save_adpa; |
| 335 | 336 | ||
| 337 | /* | ||
| 338 | * Doing a force trigger causes a hpd interrupt to get sent, which can | ||
| 339 | * get us stuck in a loop if we're polling: | ||
| 340 | * - We enable power wells and reset the ADPA | ||
| 341 | * - output_poll_exec does force probe on VGA, triggering a hpd | ||
| 342 | * - HPD handler waits for poll to unlock dev->mode_config.mutex | ||
| 343 | * - output_poll_exec shuts off the ADPA, unlocks | ||
| 344 | * dev->mode_config.mutex | ||
| 345 | * - HPD handler runs, resets ADPA and brings us back to the start | ||
| 346 | * | ||
| 347 | * Just disable HPD interrupts here to prevent this | ||
| 348 | */ | ||
| 349 | reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin); | ||
| 350 | |||
| 336 | save_adpa = adpa = I915_READ(crt->adpa_reg); | 351 | save_adpa = adpa = I915_READ(crt->adpa_reg); |
| 337 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); | 352 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
| 338 | 353 | ||
| @@ -357,6 +372,9 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector) | |||
| 357 | 372 | ||
| 358 | DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); | 373 | DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret); |
| 359 | 374 | ||
| 375 | if (reenable_hpd) | ||
| 376 | intel_hpd_enable(dev_priv, crt->base.hpd_pin); | ||
| 377 | |||
| 360 | return ret; | 378 | return ret; |
| 361 | } | 379 | } |
| 362 | 380 | ||
| @@ -717,11 +735,11 @@ static int intel_crt_set_property(struct drm_connector *connector, | |||
| 717 | return 0; | 735 | return 0; |
| 718 | } | 736 | } |
| 719 | 737 | ||
| 720 | static void intel_crt_reset(struct drm_connector *connector) | 738 | void intel_crt_reset(struct drm_encoder *encoder) |
| 721 | { | 739 | { |
| 722 | struct drm_device *dev = connector->dev; | 740 | struct drm_device *dev = encoder->dev; |
| 723 | struct drm_i915_private *dev_priv = to_i915(dev); | 741 | struct drm_i915_private *dev_priv = to_i915(dev); |
| 724 | struct intel_crt *crt = intel_attached_crt(connector); | 742 | struct intel_crt *crt = intel_encoder_to_crt(to_intel_encoder(encoder)); |
| 725 | 743 | ||
| 726 | if (INTEL_INFO(dev)->gen >= 5) { | 744 | if (INTEL_INFO(dev)->gen >= 5) { |
| 727 | u32 adpa; | 745 | u32 adpa; |
| @@ -743,7 +761,6 @@ static void intel_crt_reset(struct drm_connector *connector) | |||
| 743 | */ | 761 | */ |
| 744 | 762 | ||
| 745 | static const struct drm_connector_funcs intel_crt_connector_funcs = { | 763 | static const struct drm_connector_funcs intel_crt_connector_funcs = { |
| 746 | .reset = intel_crt_reset, | ||
| 747 | .dpms = drm_atomic_helper_connector_dpms, | 764 | .dpms = drm_atomic_helper_connector_dpms, |
| 748 | .detect = intel_crt_detect, | 765 | .detect = intel_crt_detect, |
| 749 | .fill_modes = drm_helper_probe_single_connector_modes, | 766 | .fill_modes = drm_helper_probe_single_connector_modes, |
| @@ -762,6 +779,7 @@ static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs | |||
| 762 | }; | 779 | }; |
| 763 | 780 | ||
| 764 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { | 781 | static const struct drm_encoder_funcs intel_crt_enc_funcs = { |
| 782 | .reset = intel_crt_reset, | ||
| 765 | .destroy = intel_encoder_destroy, | 783 | .destroy = intel_encoder_destroy, |
| 766 | }; | 784 | }; |
| 767 | 785 | ||
| @@ -904,5 +922,5 @@ void intel_crt_init(struct drm_device *dev) | |||
| 904 | dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; | 922 | dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config; |
| 905 | } | 923 | } |
| 906 | 924 | ||
| 907 | intel_crt_reset(connector); | 925 | intel_crt_reset(&crt->base.base); |
| 908 | } | 926 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 70555c526f6c..c457eed76f1f 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
| @@ -13924,8 +13924,50 @@ out: | |||
| 13924 | 13924 | ||
| 13925 | #undef for_each_intel_crtc_masked | 13925 | #undef for_each_intel_crtc_masked |
| 13926 | 13926 | ||
| 13927 | /* | ||
| 13928 | * FIXME: Remove this once i915 is fully DRIVER_ATOMIC by calling | ||
| 13929 | * drm_atomic_helper_legacy_gamma_set() directly. | ||
| 13930 | */ | ||
| 13931 | static int intel_atomic_legacy_gamma_set(struct drm_crtc *crtc, | ||
| 13932 | u16 *red, u16 *green, u16 *blue, | ||
| 13933 | uint32_t size) | ||
| 13934 | { | ||
| 13935 | struct drm_device *dev = crtc->dev; | ||
| 13936 | struct drm_mode_config *config = &dev->mode_config; | ||
| 13937 | struct drm_crtc_state *state; | ||
| 13938 | int ret; | ||
| 13939 | |||
| 13940 | ret = drm_atomic_helper_legacy_gamma_set(crtc, red, green, blue, size); | ||
| 13941 | if (ret) | ||
| 13942 | return ret; | ||
| 13943 | |||
| 13944 | /* | ||
| 13945 | * Make sure we update the legacy properties so this works when | ||
| 13946 | * atomic is not enabled. | ||
| 13947 | */ | ||
| 13948 | |||
| 13949 | state = crtc->state; | ||
| 13950 | |||
| 13951 | drm_object_property_set_value(&crtc->base, | ||
| 13952 | config->degamma_lut_property, | ||
| 13953 | (state->degamma_lut) ? | ||
| 13954 | state->degamma_lut->base.id : 0); | ||
| 13955 | |||
| 13956 | drm_object_property_set_value(&crtc->base, | ||
| 13957 | config->ctm_property, | ||
| 13958 | (state->ctm) ? | ||
| 13959 | state->ctm->base.id : 0); | ||
| 13960 | |||
| 13961 | drm_object_property_set_value(&crtc->base, | ||
| 13962 | config->gamma_lut_property, | ||
| 13963 | (state->gamma_lut) ? | ||
| 13964 | state->gamma_lut->base.id : 0); | ||
| 13965 | |||
| 13966 | return 0; | ||
| 13967 | } | ||
| 13968 | |||
| 13927 | static const struct drm_crtc_funcs intel_crtc_funcs = { | 13969 | static const struct drm_crtc_funcs intel_crtc_funcs = { |
| 13928 | .gamma_set = drm_atomic_helper_legacy_gamma_set, | 13970 | .gamma_set = intel_atomic_legacy_gamma_set, |
| 13929 | .set_config = drm_atomic_helper_set_config, | 13971 | .set_config = drm_atomic_helper_set_config, |
| 13930 | .set_property = drm_atomic_helper_crtc_set_property, | 13972 | .set_property = drm_atomic_helper_crtc_set_property, |
| 13931 | .destroy = intel_crtc_destroy, | 13973 | .destroy = intel_crtc_destroy, |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 55aeaf041749..3329fc6a95f4 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
| @@ -1102,7 +1102,7 @@ void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv, | |||
| 1102 | 1102 | ||
| 1103 | /* intel_crt.c */ | 1103 | /* intel_crt.c */ |
| 1104 | void intel_crt_init(struct drm_device *dev); | 1104 | void intel_crt_init(struct drm_device *dev); |
| 1105 | 1105 | void intel_crt_reset(struct drm_encoder *encoder); | |
| 1106 | 1106 | ||
| 1107 | /* intel_ddi.c */ | 1107 | /* intel_ddi.c */ |
| 1108 | void intel_ddi_clk_select(struct intel_encoder *encoder, | 1108 | void intel_ddi_clk_select(struct intel_encoder *encoder, |
| @@ -1425,6 +1425,8 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector); | |||
| 1425 | 1425 | ||
| 1426 | /* intel_dvo.c */ | 1426 | /* intel_dvo.c */ |
| 1427 | void intel_dvo_init(struct drm_device *dev); | 1427 | void intel_dvo_init(struct drm_device *dev); |
| 1428 | /* intel_hotplug.c */ | ||
| 1429 | void intel_hpd_poll_init(struct drm_i915_private *dev_priv); | ||
| 1428 | 1430 | ||
| 1429 | 1431 | ||
| 1430 | /* legacy fbdev emulation in intel_fbdev.c */ | 1432 | /* legacy fbdev emulation in intel_fbdev.c */ |
diff --git a/drivers/gpu/drm/i915/intel_hotplug.c b/drivers/gpu/drm/i915/intel_hotplug.c index 51434ec871f2..f48957ea100d 100644 --- a/drivers/gpu/drm/i915/intel_hotplug.c +++ b/drivers/gpu/drm/i915/intel_hotplug.c | |||
| @@ -452,20 +452,47 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv, | |||
| 452 | * | 452 | * |
| 453 | * This is a separate step from interrupt enabling to simplify the locking rules | 453 | * This is a separate step from interrupt enabling to simplify the locking rules |
| 454 | * in the driver load and resume code. | 454 | * in the driver load and resume code. |
| 455 | * | ||
| 456 | * Also see: intel_hpd_poll_init(), which enables connector polling | ||
| 455 | */ | 457 | */ |
| 456 | void intel_hpd_init(struct drm_i915_private *dev_priv) | 458 | void intel_hpd_init(struct drm_i915_private *dev_priv) |
| 457 | { | 459 | { |
| 458 | struct drm_device *dev = &dev_priv->drm; | ||
| 459 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 460 | struct drm_connector *connector; | ||
| 461 | int i; | 460 | int i; |
| 462 | 461 | ||
| 463 | for_each_hpd_pin(i) { | 462 | for_each_hpd_pin(i) { |
| 464 | dev_priv->hotplug.stats[i].count = 0; | 463 | dev_priv->hotplug.stats[i].count = 0; |
| 465 | dev_priv->hotplug.stats[i].state = HPD_ENABLED; | 464 | dev_priv->hotplug.stats[i].state = HPD_ENABLED; |
| 466 | } | 465 | } |
| 466 | |||
| 467 | WRITE_ONCE(dev_priv->hotplug.poll_enabled, false); | ||
| 468 | schedule_work(&dev_priv->hotplug.poll_init_work); | ||
| 469 | |||
| 470 | /* | ||
| 471 | * Interrupt setup is already guaranteed to be single-threaded, this is | ||
| 472 | * just to make the assert_spin_locked checks happy. | ||
| 473 | */ | ||
| 474 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 475 | if (dev_priv->display.hpd_irq_setup) | ||
| 476 | dev_priv->display.hpd_irq_setup(dev_priv); | ||
| 477 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 478 | } | ||
| 479 | |||
| 480 | void i915_hpd_poll_init_work(struct work_struct *work) { | ||
| 481 | struct drm_i915_private *dev_priv = | ||
| 482 | container_of(work, struct drm_i915_private, | ||
| 483 | hotplug.poll_init_work); | ||
| 484 | struct drm_device *dev = &dev_priv->drm; | ||
| 485 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
| 486 | struct drm_connector *connector; | ||
| 487 | bool enabled; | ||
| 488 | |||
| 489 | mutex_lock(&dev->mode_config.mutex); | ||
| 490 | |||
| 491 | enabled = READ_ONCE(dev_priv->hotplug.poll_enabled); | ||
| 492 | |||
| 467 | list_for_each_entry(connector, &mode_config->connector_list, head) { | 493 | list_for_each_entry(connector, &mode_config->connector_list, head) { |
| 468 | struct intel_connector *intel_connector = to_intel_connector(connector); | 494 | struct intel_connector *intel_connector = |
| 495 | to_intel_connector(connector); | ||
| 469 | connector->polled = intel_connector->polled; | 496 | connector->polled = intel_connector->polled; |
| 470 | 497 | ||
| 471 | /* MST has a dynamic intel_connector->encoder and it's reprobing | 498 | /* MST has a dynamic intel_connector->encoder and it's reprobing |
| @@ -474,24 +501,62 @@ void intel_hpd_init(struct drm_i915_private *dev_priv) | |||
| 474 | continue; | 501 | continue; |
| 475 | 502 | ||
| 476 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && | 503 | if (!connector->polled && I915_HAS_HOTPLUG(dev) && |
| 477 | intel_connector->encoder->hpd_pin > HPD_NONE) | 504 | intel_connector->encoder->hpd_pin > HPD_NONE) { |
| 478 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 505 | connector->polled = enabled ? |
| 506 | DRM_CONNECTOR_POLL_CONNECT | | ||
| 507 | DRM_CONNECTOR_POLL_DISCONNECT : | ||
| 508 | DRM_CONNECTOR_POLL_HPD; | ||
| 509 | } | ||
| 479 | } | 510 | } |
| 480 | 511 | ||
| 512 | if (enabled) | ||
| 513 | drm_kms_helper_poll_enable_locked(dev); | ||
| 514 | |||
| 515 | mutex_unlock(&dev->mode_config.mutex); | ||
| 516 | |||
| 481 | /* | 517 | /* |
| 482 | * Interrupt setup is already guaranteed to be single-threaded, this is | 518 | * We might have missed any hotplugs that happened while we were |
| 483 | * just to make the assert_spin_locked checks happy. | 519 | * in the middle of disabling polling |
| 484 | */ | 520 | */ |
| 485 | spin_lock_irq(&dev_priv->irq_lock); | 521 | if (!enabled) |
| 486 | if (dev_priv->display.hpd_irq_setup) | 522 | drm_helper_hpd_irq_event(dev); |
| 487 | dev_priv->display.hpd_irq_setup(dev_priv); | 523 | } |
| 488 | spin_unlock_irq(&dev_priv->irq_lock); | 524 | |
| 525 | /** | ||
| 526 | * intel_hpd_poll_init - enables/disables polling for connectors with hpd | ||
| 527 | * @dev_priv: i915 device instance | ||
| 528 | * @enabled: Whether to enable or disable polling | ||
| 529 | * | ||
| 530 | * This function enables polling for all connectors, regardless of whether or | ||
| 531 | * not they support hotplug detection. Under certain conditions HPD may not be | ||
| 532 | * functional. On most Intel GPUs, this happens when we enter runtime suspend. | ||
| 533 | * On Valleyview and Cherryview systems, this also happens when we shut off all | ||
| 534 | * of the powerwells. | ||
| 535 | * | ||
| 536 | * Since this function can get called in contexts where we're already holding | ||
| 537 | * dev->mode_config.mutex, we do the actual hotplug enabling in a seperate | ||
| 538 | * worker. | ||
| 539 | * | ||
| 540 | * Also see: intel_hpd_init(), which restores hpd handling. | ||
| 541 | */ | ||
| 542 | void intel_hpd_poll_init(struct drm_i915_private *dev_priv) | ||
| 543 | { | ||
| 544 | WRITE_ONCE(dev_priv->hotplug.poll_enabled, true); | ||
| 545 | |||
| 546 | /* | ||
| 547 | * We might already be holding dev->mode_config.mutex, so do this in a | ||
| 548 | * seperate worker | ||
| 549 | * As well, there's no issue if we race here since we always reschedule | ||
| 550 | * this worker anyway | ||
| 551 | */ | ||
| 552 | schedule_work(&dev_priv->hotplug.poll_init_work); | ||
| 489 | } | 553 | } |
| 490 | 554 | ||
| 491 | void intel_hpd_init_work(struct drm_i915_private *dev_priv) | 555 | void intel_hpd_init_work(struct drm_i915_private *dev_priv) |
| 492 | { | 556 | { |
| 493 | INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); | 557 | INIT_WORK(&dev_priv->hotplug.hotplug_work, i915_hotplug_work_func); |
| 494 | INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); | 558 | INIT_WORK(&dev_priv->hotplug.dig_port_work, i915_digport_work_func); |
| 559 | INIT_WORK(&dev_priv->hotplug.poll_init_work, i915_hpd_poll_init_work); | ||
| 495 | INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, | 560 | INIT_DELAYED_WORK(&dev_priv->hotplug.reenable_work, |
| 496 | intel_hpd_irq_storm_reenable_work); | 561 | intel_hpd_irq_storm_reenable_work); |
| 497 | } | 562 | } |
| @@ -508,5 +573,33 @@ void intel_hpd_cancel_work(struct drm_i915_private *dev_priv) | |||
| 508 | 573 | ||
| 509 | cancel_work_sync(&dev_priv->hotplug.dig_port_work); | 574 | cancel_work_sync(&dev_priv->hotplug.dig_port_work); |
| 510 | cancel_work_sync(&dev_priv->hotplug.hotplug_work); | 575 | cancel_work_sync(&dev_priv->hotplug.hotplug_work); |
| 576 | cancel_work_sync(&dev_priv->hotplug.poll_init_work); | ||
| 511 | cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); | 577 | cancel_delayed_work_sync(&dev_priv->hotplug.reenable_work); |
| 512 | } | 578 | } |
| 579 | |||
| 580 | bool intel_hpd_disable(struct drm_i915_private *dev_priv, enum hpd_pin pin) | ||
| 581 | { | ||
| 582 | bool ret = false; | ||
| 583 | |||
| 584 | if (pin == HPD_NONE) | ||
| 585 | return false; | ||
| 586 | |||
| 587 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 588 | if (dev_priv->hotplug.stats[pin].state == HPD_ENABLED) { | ||
| 589 | dev_priv->hotplug.stats[pin].state = HPD_DISABLED; | ||
| 590 | ret = true; | ||
| 591 | } | ||
| 592 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 593 | |||
| 594 | return ret; | ||
| 595 | } | ||
| 596 | |||
| 597 | void intel_hpd_enable(struct drm_i915_private *dev_priv, enum hpd_pin pin) | ||
| 598 | { | ||
| 599 | if (pin == HPD_NONE) | ||
| 600 | return; | ||
| 601 | |||
| 602 | spin_lock_irq(&dev_priv->irq_lock); | ||
| 603 | dev_priv->hotplug.stats[pin].state = HPD_ENABLED; | ||
| 604 | spin_unlock_irq(&dev_priv->irq_lock); | ||
| 605 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_mocs.c b/drivers/gpu/drm/i915/intel_mocs.c index 3c1482b8f2f4..927825f5b284 100644 --- a/drivers/gpu/drm/i915/intel_mocs.c +++ b/drivers/gpu/drm/i915/intel_mocs.c | |||
| @@ -66,9 +66,10 @@ struct drm_i915_mocs_table { | |||
| 66 | #define L3_WB 3 | 66 | #define L3_WB 3 |
| 67 | 67 | ||
| 68 | /* Target cache */ | 68 | /* Target cache */ |
| 69 | #define ELLC 0 | 69 | #define LE_TC_PAGETABLE 0 |
| 70 | #define LLC 1 | 70 | #define LE_TC_LLC 1 |
| 71 | #define LLC_ELLC 2 | 71 | #define LE_TC_LLC_ELLC 2 |
| 72 | #define LE_TC_LLC_ELLC_ALT 3 | ||
| 72 | 73 | ||
| 73 | /* | 74 | /* |
| 74 | * MOCS tables | 75 | * MOCS tables |
| @@ -96,34 +97,67 @@ struct drm_i915_mocs_table { | |||
| 96 | * end. | 97 | * end. |
| 97 | */ | 98 | */ |
| 98 | static const struct drm_i915_mocs_entry skylake_mocs_table[] = { | 99 | static const struct drm_i915_mocs_entry skylake_mocs_table[] = { |
| 99 | /* { 0x00000009, 0x0010 } */ | 100 | { /* 0x00000009 */ |
| 100 | { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | | 101 | .control_value = LE_CACHEABILITY(LE_UC) | |
| 101 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | 102 | LE_TGT_CACHE(LE_TC_LLC_ELLC) | |
| 102 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, | 103 | LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | |
| 103 | /* { 0x00000038, 0x0030 } */ | 104 | LE_PFM(0) | LE_SCF(0), |
| 104 | { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | 105 | |
| 105 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | 106 | /* 0x0010 */ |
| 106 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, | 107 | .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), |
| 107 | /* { 0x0000003b, 0x0030 } */ | 108 | }, |
| 108 | { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | 109 | { |
| 109 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | 110 | /* 0x00000038 */ |
| 110 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } | 111 | .control_value = LE_CACHEABILITY(LE_PAGETABLE) | |
| 112 | LE_TGT_CACHE(LE_TC_LLC_ELLC) | | ||
| 113 | LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | | ||
| 114 | LE_PFM(0) | LE_SCF(0), | ||
| 115 | /* 0x0030 */ | ||
| 116 | .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), | ||
| 117 | }, | ||
| 118 | { | ||
| 119 | /* 0x0000003b */ | ||
| 120 | .control_value = LE_CACHEABILITY(LE_WB) | | ||
| 121 | LE_TGT_CACHE(LE_TC_LLC_ELLC) | | ||
| 122 | LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | | ||
| 123 | LE_PFM(0) | LE_SCF(0), | ||
| 124 | /* 0x0030 */ | ||
| 125 | .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), | ||
| 126 | }, | ||
| 111 | }; | 127 | }; |
| 112 | 128 | ||
| 113 | /* NOTE: the LE_TGT_CACHE is not used on Broxton */ | 129 | /* NOTE: the LE_TGT_CACHE is not used on Broxton */ |
| 114 | static const struct drm_i915_mocs_entry broxton_mocs_table[] = { | 130 | static const struct drm_i915_mocs_entry broxton_mocs_table[] = { |
| 115 | /* { 0x00000009, 0x0010 } */ | 131 | { |
| 116 | { (LE_CACHEABILITY(LE_UC) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(0) | | 132 | /* 0x00000009 */ |
| 117 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | 133 | .control_value = LE_CACHEABILITY(LE_UC) | |
| 118 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC)) }, | 134 | LE_TGT_CACHE(LE_TC_LLC_ELLC) | |
| 119 | /* { 0x00000038, 0x0030 } */ | 135 | LE_LRUM(0) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | |
| 120 | { (LE_CACHEABILITY(LE_PAGETABLE) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | 136 | LE_PFM(0) | LE_SCF(0), |
| 121 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | 137 | |
| 122 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) }, | 138 | /* 0x0010 */ |
| 123 | /* { 0x0000003b, 0x0030 } */ | 139 | .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_UC), |
| 124 | { (LE_CACHEABILITY(LE_WB) | LE_TGT_CACHE(LLC_ELLC) | LE_LRUM(3) | | 140 | }, |
| 125 | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | LE_PFM(0) | LE_SCF(0)), | 141 | { |
| 126 | (L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB)) } | 142 | /* 0x00000038 */ |
| 143 | .control_value = LE_CACHEABILITY(LE_PAGETABLE) | | ||
| 144 | LE_TGT_CACHE(LE_TC_LLC_ELLC) | | ||
| 145 | LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | | ||
| 146 | LE_PFM(0) | LE_SCF(0), | ||
| 147 | |||
| 148 | /* 0x0030 */ | ||
| 149 | .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), | ||
| 150 | }, | ||
| 151 | { | ||
| 152 | /* 0x00000039 */ | ||
| 153 | .control_value = LE_CACHEABILITY(LE_UC) | | ||
| 154 | LE_TGT_CACHE(LE_TC_LLC_ELLC) | | ||
| 155 | LE_LRUM(3) | LE_AOM(0) | LE_RSC(0) | LE_SCC(0) | | ||
| 156 | LE_PFM(0) | LE_SCF(0), | ||
| 157 | |||
| 158 | /* 0x0030 */ | ||
| 159 | .l3cc_value = L3_ESC(0) | L3_SCC(0) | L3_CACHEABILITY(L3_WB), | ||
| 160 | }, | ||
| 127 | }; | 161 | }; |
| 128 | 162 | ||
| 129 | /** | 163 | /** |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 61e00bf9e87f..cca7792f26d5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
| @@ -1109,6 +1109,11 @@ static int skl_init_workarounds(struct intel_engine_cs *engine) | |||
| 1109 | /* WaDisableGafsUnitClkGating:skl */ | 1109 | /* WaDisableGafsUnitClkGating:skl */ |
| 1110 | WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); | 1110 | WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE); |
| 1111 | 1111 | ||
| 1112 | /* WaInPlaceDecompressionHang:skl */ | ||
| 1113 | if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER)) | ||
| 1114 | WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, | ||
| 1115 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | ||
| 1116 | |||
| 1112 | /* WaDisableLSQCROPERFforOCL:skl */ | 1117 | /* WaDisableLSQCROPERFforOCL:skl */ |
| 1113 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); | 1118 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); |
| 1114 | if (ret) | 1119 | if (ret) |
| @@ -1178,6 +1183,11 @@ static int bxt_init_workarounds(struct intel_engine_cs *engine) | |||
| 1178 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, | 1183 | WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2, |
| 1179 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); | 1184 | GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION); |
| 1180 | 1185 | ||
| 1186 | /* WaInPlaceDecompressionHang:bxt */ | ||
| 1187 | if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER)) | ||
| 1188 | WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, | ||
| 1189 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | ||
| 1190 | |||
| 1181 | return 0; | 1191 | return 0; |
| 1182 | } | 1192 | } |
| 1183 | 1193 | ||
| @@ -1225,6 +1235,10 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine) | |||
| 1225 | GEN7_HALF_SLICE_CHICKEN1, | 1235 | GEN7_HALF_SLICE_CHICKEN1, |
| 1226 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); | 1236 | GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE); |
| 1227 | 1237 | ||
| 1238 | /* WaInPlaceDecompressionHang:kbl */ | ||
| 1239 | WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA, | ||
| 1240 | GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS); | ||
| 1241 | |||
| 1228 | /* WaDisableLSQCROPERFforOCL:kbl */ | 1242 | /* WaDisableLSQCROPERFforOCL:kbl */ |
| 1229 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); | 1243 | ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4); |
| 1230 | if (ret) | 1244 | if (ret) |
| @@ -1305,7 +1319,8 @@ static int init_render_ring(struct intel_engine_cs *engine) | |||
| 1305 | if (IS_GEN(dev_priv, 6, 7)) | 1319 | if (IS_GEN(dev_priv, 6, 7)) |
| 1306 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); | 1320 | I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING)); |
| 1307 | 1321 | ||
| 1308 | I915_WRITE_IMR(engine, ~engine->irq_keep_mask); | 1322 | if (INTEL_INFO(dev_priv)->gen >= 6) |
| 1323 | I915_WRITE_IMR(engine, ~engine->irq_keep_mask); | ||
| 1309 | 1324 | ||
| 1310 | return init_workarounds_ring(engine); | 1325 | return init_workarounds_ring(engine); |
| 1311 | } | 1326 | } |
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6b78295f53db..1c603bbe5784 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c | |||
| @@ -1078,6 +1078,7 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv) | |||
| 1078 | 1078 | ||
| 1079 | static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) | 1079 | static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) |
| 1080 | { | 1080 | { |
| 1081 | struct intel_encoder *encoder; | ||
| 1081 | enum pipe pipe; | 1082 | enum pipe pipe; |
| 1082 | 1083 | ||
| 1083 | /* | 1084 | /* |
| @@ -1113,6 +1114,12 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv) | |||
| 1113 | 1114 | ||
| 1114 | intel_hpd_init(dev_priv); | 1115 | intel_hpd_init(dev_priv); |
| 1115 | 1116 | ||
| 1117 | /* Re-enable the ADPA, if we have one */ | ||
| 1118 | for_each_intel_encoder(&dev_priv->drm, encoder) { | ||
| 1119 | if (encoder->type == INTEL_OUTPUT_ANALOG) | ||
| 1120 | intel_crt_reset(&encoder->base); | ||
| 1121 | } | ||
| 1122 | |||
| 1116 | i915_redisable_vga_power_on(&dev_priv->drm); | 1123 | i915_redisable_vga_power_on(&dev_priv->drm); |
| 1117 | } | 1124 | } |
| 1118 | 1125 | ||
| @@ -1126,6 +1133,8 @@ static void vlv_display_power_well_deinit(struct drm_i915_private *dev_priv) | |||
| 1126 | synchronize_irq(dev_priv->drm.irq); | 1133 | synchronize_irq(dev_priv->drm.irq); |
| 1127 | 1134 | ||
| 1128 | intel_power_sequencer_reset(dev_priv); | 1135 | intel_power_sequencer_reset(dev_priv); |
| 1136 | |||
| 1137 | intel_hpd_poll_init(dev_priv); | ||
| 1129 | } | 1138 | } |
| 1130 | 1139 | ||
| 1131 | static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, | 1140 | static void vlv_display_power_well_enable(struct drm_i915_private *dev_priv, |
