diff options
-rw-r--r-- | drivers/gpu/drm/i915/i915_debugfs.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 15 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 59 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 151 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_hdmi.c | 7 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_uncore.c | 79 |
9 files changed, 230 insertions, 93 deletions
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index d4a0dddbfefb..779a275eb1fd 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c | |||
@@ -3338,6 +3338,11 @@ static int pipe_crc_set_source(struct drm_device *dev, enum pipe pipe, | |||
3338 | if (pipe_crc->source && source) | 3338 | if (pipe_crc->source && source) |
3339 | return -EINVAL; | 3339 | return -EINVAL; |
3340 | 3340 | ||
3341 | if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) { | ||
3342 | DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n"); | ||
3343 | return -EIO; | ||
3344 | } | ||
3345 | |||
3341 | if (IS_GEN2(dev)) | 3346 | if (IS_GEN2(dev)) |
3342 | ret = i8xx_pipe_crc_ctl_reg(&source, &val); | 3347 | ret = i8xx_pipe_crc_ctl_reg(&source, &val); |
3343 | else if (INTEL_INFO(dev)->gen < 5) | 3348 | else if (INTEL_INFO(dev)->gen < 5) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index fd17ccabd8a4..d2ba315f4c92 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -2574,11 +2574,13 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) | |||
2574 | list_del(&request->list); | 2574 | list_del(&request->list); |
2575 | i915_gem_request_remove_from_client(request); | 2575 | i915_gem_request_remove_from_client(request); |
2576 | 2576 | ||
2577 | if (i915.enable_execlists && ctx) { | 2577 | if (ctx) { |
2578 | struct intel_engine_cs *ring = request->ring; | 2578 | if (i915.enable_execlists) { |
2579 | struct intel_engine_cs *ring = request->ring; | ||
2579 | 2580 | ||
2580 | if (ctx != ring->default_context) | 2581 | if (ctx != ring->default_context) |
2581 | intel_lr_context_unpin(ring, ctx); | 2582 | intel_lr_context_unpin(ring, ctx); |
2583 | } | ||
2582 | i915_gem_context_unreference(ctx); | 2584 | i915_gem_context_unreference(ctx); |
2583 | } | 2585 | } |
2584 | kfree(request); | 2586 | kfree(request); |
@@ -4263,7 +4265,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data, | |||
4263 | struct drm_i915_gem_object *obj; | 4265 | struct drm_i915_gem_object *obj; |
4264 | int ret; | 4266 | int ret; |
4265 | 4267 | ||
4266 | if (INTEL_INFO(dev)->gen >= 6) | 4268 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
4267 | return -ENODEV; | 4269 | return -ENODEV; |
4268 | 4270 | ||
4269 | ret = i915_mutex_lock_interruptible(dev); | 4271 | ret = i915_mutex_lock_interruptible(dev); |
@@ -4319,6 +4321,9 @@ i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | |||
4319 | struct drm_i915_gem_object *obj; | 4321 | struct drm_i915_gem_object *obj; |
4320 | int ret; | 4322 | int ret; |
4321 | 4323 | ||
4324 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | ||
4325 | return -ENODEV; | ||
4326 | |||
4322 | ret = i915_mutex_lock_interruptible(dev); | 4327 | ret = i915_mutex_lock_interruptible(dev); |
4323 | if (ret) | 4328 | if (ret) |
4324 | return ret; | 4329 | return ret; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5908580d7c15..981834b0f9b6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -231,9 +231,6 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |||
231 | 231 | ||
232 | assert_spin_locked(&dev_priv->irq_lock); | 232 | assert_spin_locked(&dev_priv->irq_lock); |
233 | 233 | ||
234 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | ||
235 | return; | ||
236 | |||
237 | new_val = dev_priv->pm_irq_mask; | 234 | new_val = dev_priv->pm_irq_mask; |
238 | new_val &= ~interrupt_mask; | 235 | new_val &= ~interrupt_mask; |
239 | new_val |= (~enabled_irq_mask & interrupt_mask); | 236 | new_val |= (~enabled_irq_mask & interrupt_mask); |
@@ -247,14 +244,26 @@ static void snb_update_pm_irq(struct drm_i915_private *dev_priv, | |||
247 | 244 | ||
248 | void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 245 | void gen6_enable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) |
249 | { | 246 | { |
247 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | ||
248 | return; | ||
249 | |||
250 | snb_update_pm_irq(dev_priv, mask, mask); | 250 | snb_update_pm_irq(dev_priv, mask, mask); |
251 | } | 251 | } |
252 | 252 | ||
253 | void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | 253 | static void __gen6_disable_pm_irq(struct drm_i915_private *dev_priv, |
254 | uint32_t mask) | ||
254 | { | 255 | { |
255 | snb_update_pm_irq(dev_priv, mask, 0); | 256 | snb_update_pm_irq(dev_priv, mask, 0); |
256 | } | 257 | } |
257 | 258 | ||
259 | void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask) | ||
260 | { | ||
261 | if (WARN_ON(!intel_irqs_enabled(dev_priv))) | ||
262 | return; | ||
263 | |||
264 | __gen6_disable_pm_irq(dev_priv, mask); | ||
265 | } | ||
266 | |||
258 | void gen6_reset_rps_interrupts(struct drm_device *dev) | 267 | void gen6_reset_rps_interrupts(struct drm_device *dev) |
259 | { | 268 | { |
260 | struct drm_i915_private *dev_priv = dev->dev_private; | 269 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -289,16 +298,20 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) | |||
289 | 298 | ||
290 | cancel_work_sync(&dev_priv->rps.work); | 299 | cancel_work_sync(&dev_priv->rps.work); |
291 | 300 | ||
301 | spin_lock_irq(&dev_priv->irq_lock); | ||
302 | |||
292 | I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? | 303 | I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? |
293 | ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); | 304 | ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); |
305 | |||
306 | __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); | ||
294 | I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & | 307 | I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & |
295 | ~dev_priv->pm_rps_events); | 308 | ~dev_priv->pm_rps_events); |
309 | I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); | ||
310 | I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); | ||
296 | 311 | ||
297 | spin_lock_irq(&dev_priv->irq_lock); | ||
298 | dev_priv->rps.pm_iir = 0; | 312 | dev_priv->rps.pm_iir = 0; |
299 | spin_unlock_irq(&dev_priv->irq_lock); | ||
300 | 313 | ||
301 | I915_WRITE(gen6_pm_iir(dev_priv), dev_priv->pm_rps_events); | 314 | spin_unlock_irq(&dev_priv->irq_lock); |
302 | } | 315 | } |
303 | 316 | ||
304 | /** | 317 | /** |
@@ -1339,10 +1352,8 @@ static void snb_gt_irq_handler(struct drm_device *dev, | |||
1339 | 1352 | ||
1340 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | | 1353 | if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT | |
1341 | GT_BSD_CS_ERROR_INTERRUPT | | 1354 | GT_BSD_CS_ERROR_INTERRUPT | |
1342 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) { | 1355 | GT_RENDER_CS_MASTER_ERROR_INTERRUPT)) |
1343 | i915_handle_error(dev, false, "GT error interrupt 0x%08x", | 1356 | DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir); |
1344 | gt_iir); | ||
1345 | } | ||
1346 | 1357 | ||
1347 | if (gt_iir & GT_PARITY_ERROR(dev)) | 1358 | if (gt_iir & GT_PARITY_ERROR(dev)) |
1348 | ivybridge_parity_error_irq_handler(dev, gt_iir); | 1359 | ivybridge_parity_error_irq_handler(dev, gt_iir); |
@@ -1623,7 +1634,7 @@ static void display_pipe_crc_irq_handler(struct drm_device *dev, enum pipe pipe, | |||
1623 | 1634 | ||
1624 | if (!pipe_crc->entries) { | 1635 | if (!pipe_crc->entries) { |
1625 | spin_unlock(&pipe_crc->lock); | 1636 | spin_unlock(&pipe_crc->lock); |
1626 | DRM_ERROR("spurious interrupt\n"); | 1637 | DRM_DEBUG_KMS("spurious interrupt\n"); |
1627 | return; | 1638 | return; |
1628 | } | 1639 | } |
1629 | 1640 | ||
@@ -1731,11 +1742,8 @@ static void gen6_rps_irq_handler(struct drm_i915_private *dev_priv, u32 pm_iir) | |||
1731 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) | 1742 | if (pm_iir & PM_VEBOX_USER_INTERRUPT) |
1732 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); | 1743 | notify_ring(dev_priv->dev, &dev_priv->ring[VECS]); |
1733 | 1744 | ||
1734 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) { | 1745 | if (pm_iir & PM_VEBOX_CS_ERROR_INTERRUPT) |
1735 | i915_handle_error(dev_priv->dev, false, | 1746 | DRM_DEBUG("Command parser error, pm_iir 0x%08x\n", pm_iir); |
1736 | "VEBOX CS error interrupt 0x%08x", | ||
1737 | pm_iir); | ||
1738 | } | ||
1739 | } | 1747 | } |
1740 | } | 1748 | } |
1741 | 1749 | ||
@@ -2428,6 +2436,9 @@ static void i915_error_work_func(struct work_struct *work) | |||
2428 | * simulated reset via debugs, so get an RPM reference. | 2436 | * simulated reset via debugs, so get an RPM reference. |
2429 | */ | 2437 | */ |
2430 | intel_runtime_pm_get(dev_priv); | 2438 | intel_runtime_pm_get(dev_priv); |
2439 | |||
2440 | intel_prepare_reset(dev); | ||
2441 | |||
2431 | /* | 2442 | /* |
2432 | * All state reset _must_ be completed before we update the | 2443 | * All state reset _must_ be completed before we update the |
2433 | * reset counter, for otherwise waiters might miss the reset | 2444 | * reset counter, for otherwise waiters might miss the reset |
@@ -2436,7 +2447,7 @@ static void i915_error_work_func(struct work_struct *work) | |||
2436 | */ | 2447 | */ |
2437 | ret = i915_reset(dev); | 2448 | ret = i915_reset(dev); |
2438 | 2449 | ||
2439 | intel_display_handle_reset(dev); | 2450 | intel_finish_reset(dev); |
2440 | 2451 | ||
2441 | intel_runtime_pm_put(dev_priv); | 2452 | intel_runtime_pm_put(dev_priv); |
2442 | 2453 | ||
@@ -3746,9 +3757,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg) | |||
3746 | */ | 3757 | */ |
3747 | spin_lock(&dev_priv->irq_lock); | 3758 | spin_lock(&dev_priv->irq_lock); |
3748 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 3759 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
3749 | i915_handle_error(dev, false, | 3760 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
3750 | "Command parser error, iir 0x%08x", | ||
3751 | iir); | ||
3752 | 3761 | ||
3753 | for_each_pipe(dev_priv, pipe) { | 3762 | for_each_pipe(dev_priv, pipe) { |
3754 | int reg = PIPESTAT(pipe); | 3763 | int reg = PIPESTAT(pipe); |
@@ -3929,9 +3938,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) | |||
3929 | */ | 3938 | */ |
3930 | spin_lock(&dev_priv->irq_lock); | 3939 | spin_lock(&dev_priv->irq_lock); |
3931 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 3940 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
3932 | i915_handle_error(dev, false, | 3941 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
3933 | "Command parser error, iir 0x%08x", | ||
3934 | iir); | ||
3935 | 3942 | ||
3936 | for_each_pipe(dev_priv, pipe) { | 3943 | for_each_pipe(dev_priv, pipe) { |
3937 | int reg = PIPESTAT(pipe); | 3944 | int reg = PIPESTAT(pipe); |
@@ -4154,9 +4161,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg) | |||
4154 | */ | 4161 | */ |
4155 | spin_lock(&dev_priv->irq_lock); | 4162 | spin_lock(&dev_priv->irq_lock); |
4156 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) | 4163 | if (iir & I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT) |
4157 | i915_handle_error(dev, false, | 4164 | DRM_DEBUG("Command parser error, iir 0x%08x\n", iir); |
4158 | "Command parser error, iir 0x%08x", | ||
4159 | iir); | ||
4160 | 4165 | ||
4161 | for_each_pipe(dev_priv, pipe) { | 4166 | for_each_pipe(dev_priv, pipe) { |
4162 | int reg = PIPESTAT(pipe); | 4167 | int reg = PIPESTAT(pipe); |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 3102907a96a7..544675895c8d 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -78,11 +78,12 @@ | |||
78 | 78 | ||
79 | 79 | ||
80 | /* Graphics reset regs */ | 80 | /* Graphics reset regs */ |
81 | #define I965_GDRST 0xc0 /* PCI config register */ | 81 | #define I915_GDRST 0xc0 /* PCI config register */ |
82 | #define GRDOM_FULL (0<<2) | 82 | #define GRDOM_FULL (0<<2) |
83 | #define GRDOM_RENDER (1<<2) | 83 | #define GRDOM_RENDER (1<<2) |
84 | #define GRDOM_MEDIA (3<<2) | 84 | #define GRDOM_MEDIA (3<<2) |
85 | #define GRDOM_MASK (3<<2) | 85 | #define GRDOM_MASK (3<<2) |
86 | #define GRDOM_RESET_STATUS (1<<1) | ||
86 | #define GRDOM_RESET_ENABLE (1<<0) | 87 | #define GRDOM_RESET_ENABLE (1<<0) |
87 | 88 | ||
88 | #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ | 89 | #define ILK_GDSR 0x2ca4 /* MCHBAR offset */ |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 853697fc4d4b..6289babd03b0 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -2765,25 +2765,10 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb, | |||
2765 | return 0; | 2765 | return 0; |
2766 | } | 2766 | } |
2767 | 2767 | ||
2768 | void intel_display_handle_reset(struct drm_device *dev) | 2768 | static void intel_complete_page_flips(struct drm_device *dev) |
2769 | { | 2769 | { |
2770 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2771 | struct drm_crtc *crtc; | 2770 | struct drm_crtc *crtc; |
2772 | 2771 | ||
2773 | /* | ||
2774 | * Flips in the rings have been nuked by the reset, | ||
2775 | * so complete all pending flips so that user space | ||
2776 | * will get its events and not get stuck. | ||
2777 | * | ||
2778 | * Also update the base address of all primary | ||
2779 | * planes to the the last fb to make sure we're | ||
2780 | * showing the correct fb after a reset. | ||
2781 | * | ||
2782 | * Need to make two loops over the crtcs so that we | ||
2783 | * don't try to grab a crtc mutex before the | ||
2784 | * pending_flip_queue really got woken up. | ||
2785 | */ | ||
2786 | |||
2787 | for_each_crtc(dev, crtc) { | 2772 | for_each_crtc(dev, crtc) { |
2788 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2773 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2789 | enum plane plane = intel_crtc->plane; | 2774 | enum plane plane = intel_crtc->plane; |
@@ -2791,6 +2776,12 @@ void intel_display_handle_reset(struct drm_device *dev) | |||
2791 | intel_prepare_page_flip(dev, plane); | 2776 | intel_prepare_page_flip(dev, plane); |
2792 | intel_finish_page_flip_plane(dev, plane); | 2777 | intel_finish_page_flip_plane(dev, plane); |
2793 | } | 2778 | } |
2779 | } | ||
2780 | |||
2781 | static void intel_update_primary_planes(struct drm_device *dev) | ||
2782 | { | ||
2783 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2784 | struct drm_crtc *crtc; | ||
2794 | 2785 | ||
2795 | for_each_crtc(dev, crtc) { | 2786 | for_each_crtc(dev, crtc) { |
2796 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2787 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -2810,6 +2801,79 @@ void intel_display_handle_reset(struct drm_device *dev) | |||
2810 | } | 2801 | } |
2811 | } | 2802 | } |
2812 | 2803 | ||
2804 | void intel_prepare_reset(struct drm_device *dev) | ||
2805 | { | ||
2806 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
2807 | struct intel_crtc *crtc; | ||
2808 | |||
2809 | /* no reset support for gen2 */ | ||
2810 | if (IS_GEN2(dev)) | ||
2811 | return; | ||
2812 | |||
2813 | /* reset doesn't touch the display */ | ||
2814 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) | ||
2815 | return; | ||
2816 | |||
2817 | drm_modeset_lock_all(dev); | ||
2818 | |||
2819 | /* | ||
2820 | * Disabling the crtcs gracefully seems nicer. Also the | ||
2821 | * g33 docs say we should at least disable all the planes. | ||
2822 | */ | ||
2823 | for_each_intel_crtc(dev, crtc) { | ||
2824 | if (crtc->active) | ||
2825 | dev_priv->display.crtc_disable(&crtc->base); | ||
2826 | } | ||
2827 | } | ||
2828 | |||
2829 | void intel_finish_reset(struct drm_device *dev) | ||
2830 | { | ||
2831 | struct drm_i915_private *dev_priv = to_i915(dev); | ||
2832 | |||
2833 | /* | ||
2834 | * Flips in the rings will be nuked by the reset, | ||
2835 | * so complete all pending flips so that user space | ||
2836 | * will get its events and not get stuck. | ||
2837 | */ | ||
2838 | intel_complete_page_flips(dev); | ||
2839 | |||
2840 | /* no reset support for gen2 */ | ||
2841 | if (IS_GEN2(dev)) | ||
2842 | return; | ||
2843 | |||
2844 | /* reset doesn't touch the display */ | ||
2845 | if (INTEL_INFO(dev)->gen >= 5 || IS_G4X(dev)) { | ||
2846 | /* | ||
2847 | * Flips in the rings have been nuked by the reset, | ||
2848 | * so update the base address of all primary | ||
2849 | * planes to the the last fb to make sure we're | ||
2850 | * showing the correct fb after a reset. | ||
2851 | */ | ||
2852 | intel_update_primary_planes(dev); | ||
2853 | return; | ||
2854 | } | ||
2855 | |||
2856 | /* | ||
2857 | * The display has been reset as well, | ||
2858 | * so need a full re-initialization. | ||
2859 | */ | ||
2860 | intel_runtime_pm_disable_interrupts(dev_priv); | ||
2861 | intel_runtime_pm_enable_interrupts(dev_priv); | ||
2862 | |||
2863 | intel_modeset_init_hw(dev); | ||
2864 | |||
2865 | spin_lock_irq(&dev_priv->irq_lock); | ||
2866 | if (dev_priv->display.hpd_irq_setup) | ||
2867 | dev_priv->display.hpd_irq_setup(dev); | ||
2868 | spin_unlock_irq(&dev_priv->irq_lock); | ||
2869 | |||
2870 | intel_modeset_setup_hw_state(dev, true); | ||
2871 | |||
2872 | intel_hpd_init(dev_priv); | ||
2873 | |||
2874 | drm_modeset_unlock_all(dev); | ||
2875 | } | ||
2876 | |||
2813 | static int | 2877 | static int |
2814 | intel_finish_fb(struct drm_framebuffer *old_fb) | 2878 | intel_finish_fb(struct drm_framebuffer *old_fb) |
2815 | { | 2879 | { |
@@ -10089,6 +10153,48 @@ static bool check_encoder_cloning(struct intel_crtc *crtc) | |||
10089 | return true; | 10153 | return true; |
10090 | } | 10154 | } |
10091 | 10155 | ||
10156 | static bool check_digital_port_conflicts(struct drm_device *dev) | ||
10157 | { | ||
10158 | struct intel_connector *connector; | ||
10159 | unsigned int used_ports = 0; | ||
10160 | |||
10161 | /* | ||
10162 | * Walk the connector list instead of the encoder | ||
10163 | * list to detect the problem on ddi platforms | ||
10164 | * where there's just one encoder per digital port. | ||
10165 | */ | ||
10166 | list_for_each_entry(connector, | ||
10167 | &dev->mode_config.connector_list, base.head) { | ||
10168 | struct intel_encoder *encoder = connector->new_encoder; | ||
10169 | |||
10170 | if (!encoder) | ||
10171 | continue; | ||
10172 | |||
10173 | WARN_ON(!encoder->new_crtc); | ||
10174 | |||
10175 | switch (encoder->type) { | ||
10176 | unsigned int port_mask; | ||
10177 | case INTEL_OUTPUT_UNKNOWN: | ||
10178 | if (WARN_ON(!HAS_DDI(dev))) | ||
10179 | break; | ||
10180 | case INTEL_OUTPUT_DISPLAYPORT: | ||
10181 | case INTEL_OUTPUT_HDMI: | ||
10182 | case INTEL_OUTPUT_EDP: | ||
10183 | port_mask = 1 << enc_to_dig_port(&encoder->base)->port; | ||
10184 | |||
10185 | /* the same port mustn't appear more than once */ | ||
10186 | if (used_ports & port_mask) | ||
10187 | return false; | ||
10188 | |||
10189 | used_ports |= port_mask; | ||
10190 | default: | ||
10191 | break; | ||
10192 | } | ||
10193 | } | ||
10194 | |||
10195 | return true; | ||
10196 | } | ||
10197 | |||
10092 | static struct intel_crtc_config * | 10198 | static struct intel_crtc_config * |
10093 | intel_modeset_pipe_config(struct drm_crtc *crtc, | 10199 | intel_modeset_pipe_config(struct drm_crtc *crtc, |
10094 | struct drm_framebuffer *fb, | 10200 | struct drm_framebuffer *fb, |
@@ -10105,6 +10211,11 @@ intel_modeset_pipe_config(struct drm_crtc *crtc, | |||
10105 | return ERR_PTR(-EINVAL); | 10211 | return ERR_PTR(-EINVAL); |
10106 | } | 10212 | } |
10107 | 10213 | ||
10214 | if (!check_digital_port_conflicts(dev)) { | ||
10215 | DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n"); | ||
10216 | return ERR_PTR(-EINVAL); | ||
10217 | } | ||
10218 | |||
10108 | pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); | 10219 | pipe_config = kzalloc(sizeof(*pipe_config), GFP_KERNEL); |
10109 | if (!pipe_config) | 10220 | if (!pipe_config) |
10110 | return ERR_PTR(-ENOMEM); | 10221 | return ERR_PTR(-ENOMEM); |
@@ -10907,7 +11018,6 @@ intel_modeset_compute_config(struct drm_crtc *crtc, | |||
10907 | } | 11018 | } |
10908 | intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, | 11019 | intel_dump_pipe_config(to_intel_crtc(crtc), pipe_config, |
10909 | "[modeset]"); | 11020 | "[modeset]"); |
10910 | to_intel_crtc(crtc)->new_config = pipe_config; | ||
10911 | 11021 | ||
10912 | out: | 11022 | out: |
10913 | return pipe_config; | 11023 | return pipe_config; |
@@ -10933,6 +11043,9 @@ static int __intel_set_mode(struct drm_crtc *crtc, | |||
10933 | 11043 | ||
10934 | *saved_mode = crtc->mode; | 11044 | *saved_mode = crtc->mode; |
10935 | 11045 | ||
11046 | if (modeset_pipes) | ||
11047 | to_intel_crtc(crtc)->new_config = pipe_config; | ||
11048 | |||
10936 | /* | 11049 | /* |
10937 | * See if the config requires any additional preparation, e.g. | 11050 | * See if the config requires any additional preparation, e.g. |
10938 | * to adjust global state with pipes off. We need to do this | 11051 | * to adjust global state with pipes off. We need to do this |
@@ -11466,12 +11579,12 @@ static int intel_crtc_set_config(struct drm_mode_set *set) | |||
11466 | ret = PTR_ERR(pipe_config); | 11579 | ret = PTR_ERR(pipe_config); |
11467 | goto fail; | 11580 | goto fail; |
11468 | } else if (pipe_config) { | 11581 | } else if (pipe_config) { |
11469 | if (to_intel_crtc(set->crtc)->new_config->has_audio != | 11582 | if (pipe_config->has_audio != |
11470 | to_intel_crtc(set->crtc)->config.has_audio) | 11583 | to_intel_crtc(set->crtc)->config.has_audio) |
11471 | config->mode_changed = true; | 11584 | config->mode_changed = true; |
11472 | 11585 | ||
11473 | /* Force mode sets for any infoframe stuff */ | 11586 | /* Force mode sets for any infoframe stuff */ |
11474 | if (to_intel_crtc(set->crtc)->new_config->has_infoframe || | 11587 | if (pipe_config->has_infoframe || |
11475 | to_intel_crtc(set->crtc)->config.has_infoframe) | 11588 | to_intel_crtc(set->crtc)->config.has_infoframe) |
11476 | config->mode_changed = true; | 11589 | config->mode_changed = true; |
11477 | } | 11590 | } |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index d2529ec280c8..5cecc20efa71 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -1503,6 +1503,7 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp) | |||
1503 | if (!is_edp(intel_dp)) | 1503 | if (!is_edp(intel_dp)) |
1504 | return false; | 1504 | return false; |
1505 | 1505 | ||
1506 | cancel_delayed_work(&intel_dp->panel_vdd_work); | ||
1506 | intel_dp->want_panel_vdd = true; | 1507 | intel_dp->want_panel_vdd = true; |
1507 | 1508 | ||
1508 | if (edp_have_panel_vdd(intel_dp)) | 1509 | if (edp_have_panel_vdd(intel_dp)) |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index f0a46ecf3f3a..25fdbb16d4e0 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -958,7 +958,8 @@ unsigned long intel_gen4_compute_page_offset(int *x, int *y, | |||
958 | unsigned int tiling_mode, | 958 | unsigned int tiling_mode, |
959 | unsigned int bpp, | 959 | unsigned int bpp, |
960 | unsigned int pitch); | 960 | unsigned int pitch); |
961 | void intel_display_handle_reset(struct drm_device *dev); | 961 | void intel_prepare_reset(struct drm_device *dev); |
962 | void intel_finish_reset(struct drm_device *dev); | ||
962 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); | 963 | void hsw_enable_pc8(struct drm_i915_private *dev_priv); |
963 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); | 964 | void hsw_disable_pc8(struct drm_i915_private *dev_priv); |
964 | void intel_dp_get_m_n(struct intel_crtc *crtc, | 965 | void intel_dp_get_m_n(struct intel_crtc *crtc, |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index ec873338e84d..3abc2000fce9 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -1461,10 +1461,13 @@ static void chv_hdmi_post_disable(struct intel_encoder *encoder) | |||
1461 | static void chv_hdmi_pre_enable(struct intel_encoder *encoder) | 1461 | static void chv_hdmi_pre_enable(struct intel_encoder *encoder) |
1462 | { | 1462 | { |
1463 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); | 1463 | struct intel_digital_port *dport = enc_to_dig_port(&encoder->base); |
1464 | struct intel_hdmi *intel_hdmi = &dport->hdmi; | ||
1464 | struct drm_device *dev = encoder->base.dev; | 1465 | struct drm_device *dev = encoder->base.dev; |
1465 | struct drm_i915_private *dev_priv = dev->dev_private; | 1466 | struct drm_i915_private *dev_priv = dev->dev_private; |
1466 | struct intel_crtc *intel_crtc = | 1467 | struct intel_crtc *intel_crtc = |
1467 | to_intel_crtc(encoder->base.crtc); | 1468 | to_intel_crtc(encoder->base.crtc); |
1469 | struct drm_display_mode *adjusted_mode = | ||
1470 | &intel_crtc->config.adjusted_mode; | ||
1468 | enum dpio_channel ch = vlv_dport_to_channel(dport); | 1471 | enum dpio_channel ch = vlv_dport_to_channel(dport); |
1469 | int pipe = intel_crtc->pipe; | 1472 | int pipe = intel_crtc->pipe; |
1470 | int data, i; | 1473 | int data, i; |
@@ -1589,6 +1592,10 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder) | |||
1589 | 1592 | ||
1590 | mutex_unlock(&dev_priv->dpio_lock); | 1593 | mutex_unlock(&dev_priv->dpio_lock); |
1591 | 1594 | ||
1595 | intel_hdmi->set_infoframes(&encoder->base, | ||
1596 | intel_crtc->config.has_hdmi_sink, | ||
1597 | adjusted_mode); | ||
1598 | |||
1592 | intel_enable_hdmi(encoder); | 1599 | intel_enable_hdmi(encoder); |
1593 | 1600 | ||
1594 | vlv_wait_port_ready(dev_priv, dport); | 1601 | vlv_wait_port_ready(dev_priv, dport); |
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 1a3e485a4f97..46de8d75b4bf 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c | |||
@@ -43,8 +43,8 @@ | |||
43 | static void | 43 | static void |
44 | assert_device_not_suspended(struct drm_i915_private *dev_priv) | 44 | assert_device_not_suspended(struct drm_i915_private *dev_priv) |
45 | { | 45 | { |
46 | WARN(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, | 46 | WARN_ONCE(HAS_RUNTIME_PM(dev_priv->dev) && dev_priv->pm.suspended, |
47 | "Device suspended\n"); | 47 | "Device suspended\n"); |
48 | } | 48 | } |
49 | 49 | ||
50 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) | 50 | static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv) |
@@ -671,18 +671,22 @@ void assert_force_wake_inactive(struct drm_i915_private *dev_priv) | |||
671 | REG_RANGE((reg), 0x22000, 0x24000)) | 671 | REG_RANGE((reg), 0x22000, 0x24000)) |
672 | 672 | ||
673 | #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ | 673 | #define FORCEWAKE_GEN9_UNCORE_RANGE_OFFSET(reg) \ |
674 | REG_RANGE((reg), 0xC00, 0x2000) | 674 | REG_RANGE((reg), 0xB00, 0x2000) |
675 | 675 | ||
676 | #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ | 676 | #define FORCEWAKE_GEN9_RENDER_RANGE_OFFSET(reg) \ |
677 | (REG_RANGE((reg), 0x2000, 0x4000) || \ | 677 | (REG_RANGE((reg), 0x2000, 0x2700) || \ |
678 | REG_RANGE((reg), 0x3000, 0x4000) || \ | ||
678 | REG_RANGE((reg), 0x5200, 0x8000) || \ | 679 | REG_RANGE((reg), 0x5200, 0x8000) || \ |
680 | REG_RANGE((reg), 0x8140, 0x8160) || \ | ||
679 | REG_RANGE((reg), 0x8300, 0x8500) || \ | 681 | REG_RANGE((reg), 0x8300, 0x8500) || \ |
680 | REG_RANGE((reg), 0x8C00, 0x8D00) || \ | 682 | REG_RANGE((reg), 0x8C00, 0x8D00) || \ |
681 | REG_RANGE((reg), 0xB000, 0xB480) || \ | 683 | REG_RANGE((reg), 0xB000, 0xB480) || \ |
682 | REG_RANGE((reg), 0xE000, 0xE800)) | 684 | REG_RANGE((reg), 0xE000, 0xE900) || \ |
685 | REG_RANGE((reg), 0x24400, 0x24800)) | ||
683 | 686 | ||
684 | #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ | 687 | #define FORCEWAKE_GEN9_MEDIA_RANGE_OFFSET(reg) \ |
685 | (REG_RANGE((reg), 0x8800, 0x8A00) || \ | 688 | (REG_RANGE((reg), 0x8130, 0x8140) || \ |
689 | REG_RANGE((reg), 0x8800, 0x8A00) || \ | ||
686 | REG_RANGE((reg), 0xD000, 0xD800) || \ | 690 | REG_RANGE((reg), 0xD000, 0xD800) || \ |
687 | REG_RANGE((reg), 0x12000, 0x14000) || \ | 691 | REG_RANGE((reg), 0x12000, 0x14000) || \ |
688 | REG_RANGE((reg), 0x1A000, 0x1EA00) || \ | 692 | REG_RANGE((reg), 0x1A000, 0x1EA00) || \ |
@@ -1345,41 +1349,34 @@ int i915_get_reset_stats_ioctl(struct drm_device *dev, | |||
1345 | return 0; | 1349 | return 0; |
1346 | } | 1350 | } |
1347 | 1351 | ||
1348 | static int i965_reset_complete(struct drm_device *dev) | 1352 | static int i915_reset_complete(struct drm_device *dev) |
1349 | { | 1353 | { |
1350 | u8 gdrst; | 1354 | u8 gdrst; |
1351 | pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); | 1355 | pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); |
1352 | return (gdrst & GRDOM_RESET_ENABLE) == 0; | 1356 | return (gdrst & GRDOM_RESET_STATUS) == 0; |
1353 | } | 1357 | } |
1354 | 1358 | ||
1355 | static int i965_do_reset(struct drm_device *dev) | 1359 | static int i915_do_reset(struct drm_device *dev) |
1356 | { | 1360 | { |
1357 | int ret; | 1361 | /* assert reset for at least 20 usec */ |
1358 | 1362 | pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); | |
1359 | /* FIXME: i965g/gm need a display save/restore for gpu reset. */ | 1363 | udelay(20); |
1360 | return -ENODEV; | 1364 | pci_write_config_byte(dev->pdev, I915_GDRST, 0); |
1361 | 1365 | ||
1362 | /* | 1366 | return wait_for(i915_reset_complete(dev), 500); |
1363 | * Set the domains we want to reset (GRDOM/bits 2 and 3) as | 1367 | } |
1364 | * well as the reset bit (GR/bit 0). Setting the GR bit | ||
1365 | * triggers the reset; when done, the hardware will clear it. | ||
1366 | */ | ||
1367 | pci_write_config_byte(dev->pdev, I965_GDRST, | ||
1368 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | ||
1369 | ret = wait_for(i965_reset_complete(dev), 500); | ||
1370 | if (ret) | ||
1371 | return ret; | ||
1372 | |||
1373 | pci_write_config_byte(dev->pdev, I965_GDRST, | ||
1374 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | ||
1375 | |||
1376 | ret = wait_for(i965_reset_complete(dev), 500); | ||
1377 | if (ret) | ||
1378 | return ret; | ||
1379 | 1368 | ||
1380 | pci_write_config_byte(dev->pdev, I965_GDRST, 0); | 1369 | static int g4x_reset_complete(struct drm_device *dev) |
1370 | { | ||
1371 | u8 gdrst; | ||
1372 | pci_read_config_byte(dev->pdev, I915_GDRST, &gdrst); | ||
1373 | return (gdrst & GRDOM_RESET_ENABLE) == 0; | ||
1374 | } | ||
1381 | 1375 | ||
1382 | return 0; | 1376 | static int g33_do_reset(struct drm_device *dev) |
1377 | { | ||
1378 | pci_write_config_byte(dev->pdev, I915_GDRST, GRDOM_RESET_ENABLE); | ||
1379 | return wait_for(g4x_reset_complete(dev), 500); | ||
1383 | } | 1380 | } |
1384 | 1381 | ||
1385 | static int g4x_do_reset(struct drm_device *dev) | 1382 | static int g4x_do_reset(struct drm_device *dev) |
@@ -1387,9 +1384,9 @@ static int g4x_do_reset(struct drm_device *dev) | |||
1387 | struct drm_i915_private *dev_priv = dev->dev_private; | 1384 | struct drm_i915_private *dev_priv = dev->dev_private; |
1388 | int ret; | 1385 | int ret; |
1389 | 1386 | ||
1390 | pci_write_config_byte(dev->pdev, I965_GDRST, | 1387 | pci_write_config_byte(dev->pdev, I915_GDRST, |
1391 | GRDOM_RENDER | GRDOM_RESET_ENABLE); | 1388 | GRDOM_RENDER | GRDOM_RESET_ENABLE); |
1392 | ret = wait_for(i965_reset_complete(dev), 500); | 1389 | ret = wait_for(g4x_reset_complete(dev), 500); |
1393 | if (ret) | 1390 | if (ret) |
1394 | return ret; | 1391 | return ret; |
1395 | 1392 | ||
@@ -1397,9 +1394,9 @@ static int g4x_do_reset(struct drm_device *dev) | |||
1397 | I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); | 1394 | I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) | VCP_UNIT_CLOCK_GATE_DISABLE); |
1398 | POSTING_READ(VDECCLK_GATE_D); | 1395 | POSTING_READ(VDECCLK_GATE_D); |
1399 | 1396 | ||
1400 | pci_write_config_byte(dev->pdev, I965_GDRST, | 1397 | pci_write_config_byte(dev->pdev, I915_GDRST, |
1401 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); | 1398 | GRDOM_MEDIA | GRDOM_RESET_ENABLE); |
1402 | ret = wait_for(i965_reset_complete(dev), 500); | 1399 | ret = wait_for(g4x_reset_complete(dev), 500); |
1403 | if (ret) | 1400 | if (ret) |
1404 | return ret; | 1401 | return ret; |
1405 | 1402 | ||
@@ -1407,7 +1404,7 @@ static int g4x_do_reset(struct drm_device *dev) | |||
1407 | I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); | 1404 | I915_WRITE(VDECCLK_GATE_D, I915_READ(VDECCLK_GATE_D) & ~VCP_UNIT_CLOCK_GATE_DISABLE); |
1408 | POSTING_READ(VDECCLK_GATE_D); | 1405 | POSTING_READ(VDECCLK_GATE_D); |
1409 | 1406 | ||
1410 | pci_write_config_byte(dev->pdev, I965_GDRST, 0); | 1407 | pci_write_config_byte(dev->pdev, I915_GDRST, 0); |
1411 | 1408 | ||
1412 | return 0; | 1409 | return 0; |
1413 | } | 1410 | } |
@@ -1465,8 +1462,10 @@ int intel_gpu_reset(struct drm_device *dev) | |||
1465 | return ironlake_do_reset(dev); | 1462 | return ironlake_do_reset(dev); |
1466 | else if (IS_G4X(dev)) | 1463 | else if (IS_G4X(dev)) |
1467 | return g4x_do_reset(dev); | 1464 | return g4x_do_reset(dev); |
1468 | else if (IS_GEN4(dev)) | 1465 | else if (IS_G33(dev)) |
1469 | return i965_do_reset(dev); | 1466 | return g33_do_reset(dev); |
1467 | else if (INTEL_INFO(dev)->gen >= 3) | ||
1468 | return i915_do_reset(dev); | ||
1470 | else | 1469 | else |
1471 | return -ENODEV; | 1470 | return -ENODEV; |
1472 | } | 1471 | } |