diff options
-rw-r--r-- | drivers/gpu/drm/i915/Makefile | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 28 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 75 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_irq.c | 21 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_acpi.c | 286 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 60 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 149 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 379 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_fb.c | 2 | ||||
-rw-r--r-- | include/drm/drm_dp_helper.h | 3 |
14 files changed, 723 insertions, 303 deletions
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index f6e98dd416c9..fdc833d5cc7b 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile | |||
@@ -35,6 +35,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \ | |||
35 | 35 | ||
36 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | 36 | i915-$(CONFIG_COMPAT) += i915_ioc32.o |
37 | 37 | ||
38 | i915-$(CONFIG_ACPI) += intel_acpi.o | ||
39 | |||
38 | obj-$(CONFIG_DRM_I915) += i915.o | 40 | obj-$(CONFIG_DRM_I915) += i915.o |
39 | 41 | ||
40 | CFLAGS_i915_trace_points.o := -I$(src) | 42 | CFLAGS_i915_trace_points.o := -I$(src) |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 251987307ebe..a99fae33bdf6 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -1244,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev, | |||
1244 | if (ret) | 1244 | if (ret) |
1245 | goto cleanup_ringbuffer; | 1245 | goto cleanup_ringbuffer; |
1246 | 1246 | ||
1247 | intel_register_dsm_handler(); | ||
1248 | |||
1247 | ret = vga_switcheroo_register_client(dev->pdev, | 1249 | ret = vga_switcheroo_register_client(dev->pdev, |
1248 | i915_switcheroo_set_state, | 1250 | i915_switcheroo_set_state, |
1249 | i915_switcheroo_can_switch); | 1251 | i915_switcheroo_can_switch); |
@@ -2153,6 +2155,9 @@ int i915_driver_unload(struct drm_device *dev) | |||
2153 | drm_mm_takedown(&dev_priv->mm.vram); | 2155 | drm_mm_takedown(&dev_priv->mm.vram); |
2154 | 2156 | ||
2155 | intel_cleanup_overlay(dev); | 2157 | intel_cleanup_overlay(dev); |
2158 | |||
2159 | if (!I915_NEED_GFX_HWS(dev)) | ||
2160 | i915_free_hws(dev); | ||
2156 | } | 2161 | } |
2157 | 2162 | ||
2158 | intel_teardown_gmbus(dev); | 2163 | intel_teardown_gmbus(dev); |
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 73ad8bff2c2a..84e33aeececd 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -339,17 +339,18 @@ typedef struct drm_i915_private { | |||
339 | unsigned int int_crt_support:1; | 339 | unsigned int int_crt_support:1; |
340 | unsigned int lvds_use_ssc:1; | 340 | unsigned int lvds_use_ssc:1; |
341 | int lvds_ssc_freq; | 341 | int lvds_ssc_freq; |
342 | |||
343 | struct { | 342 | struct { |
344 | u8 rate:4; | 343 | int rate; |
345 | u8 lanes:4; | 344 | int lanes; |
346 | u8 preemphasis:4; | 345 | int preemphasis; |
347 | u8 vswing:4; | 346 | int vswing; |
348 | 347 | ||
349 | u8 initialized:1; | 348 | bool initialized; |
350 | u8 support:1; | 349 | bool support; |
351 | u8 bpp:6; | 350 | int bpp; |
351 | struct edp_power_seq pps; | ||
352 | } edp; | 352 | } edp; |
353 | bool no_aux_handshake; | ||
353 | 354 | ||
354 | struct notifier_block lid_notifier; | 355 | struct notifier_block lid_notifier; |
355 | 356 | ||
@@ -1136,6 +1137,15 @@ static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; } | |||
1136 | static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } | 1137 | static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; } |
1137 | #endif | 1138 | #endif |
1138 | 1139 | ||
1140 | /* intel_acpi.c */ | ||
1141 | #ifdef CONFIG_ACPI | ||
1142 | extern void intel_register_dsm_handler(void); | ||
1143 | extern void intel_unregister_dsm_handler(void); | ||
1144 | #else | ||
1145 | static inline void intel_register_dsm_handler(void) { return; } | ||
1146 | static inline void intel_unregister_dsm_handler(void) { return; } | ||
1147 | #endif /* CONFIG_ACPI */ | ||
1148 | |||
1139 | /* modesetting */ | 1149 | /* modesetting */ |
1140 | extern void intel_modeset_init(struct drm_device *dev); | 1150 | extern void intel_modeset_init(struct drm_device *dev); |
1141 | extern void intel_modeset_cleanup(struct drm_device *dev); | 1151 | extern void intel_modeset_cleanup(struct drm_device *dev); |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 100a7537980e..72ab3032300a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -3647,41 +3647,6 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer2 *exec, | |||
3647 | } | 3647 | } |
3648 | 3648 | ||
3649 | static int | 3649 | static int |
3650 | i915_gem_wait_for_pending_flip(struct drm_device *dev, | ||
3651 | struct drm_gem_object **object_list, | ||
3652 | int count) | ||
3653 | { | ||
3654 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
3655 | struct drm_i915_gem_object *obj_priv; | ||
3656 | DEFINE_WAIT(wait); | ||
3657 | int i, ret = 0; | ||
3658 | |||
3659 | for (;;) { | ||
3660 | prepare_to_wait(&dev_priv->pending_flip_queue, | ||
3661 | &wait, TASK_INTERRUPTIBLE); | ||
3662 | for (i = 0; i < count; i++) { | ||
3663 | obj_priv = to_intel_bo(object_list[i]); | ||
3664 | if (atomic_read(&obj_priv->pending_flip) > 0) | ||
3665 | break; | ||
3666 | } | ||
3667 | if (i == count) | ||
3668 | break; | ||
3669 | |||
3670 | if (!signal_pending(current)) { | ||
3671 | mutex_unlock(&dev->struct_mutex); | ||
3672 | schedule(); | ||
3673 | mutex_lock(&dev->struct_mutex); | ||
3674 | continue; | ||
3675 | } | ||
3676 | ret = -ERESTARTSYS; | ||
3677 | break; | ||
3678 | } | ||
3679 | finish_wait(&dev_priv->pending_flip_queue, &wait); | ||
3680 | |||
3681 | return ret; | ||
3682 | } | ||
3683 | |||
3684 | static int | ||
3685 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, | 3650 | i915_gem_do_execbuffer(struct drm_device *dev, void *data, |
3686 | struct drm_file *file_priv, | 3651 | struct drm_file *file_priv, |
3687 | struct drm_i915_gem_execbuffer2 *args, | 3652 | struct drm_i915_gem_execbuffer2 *args, |
@@ -3773,7 +3738,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3773 | } | 3738 | } |
3774 | 3739 | ||
3775 | /* Look up object handles */ | 3740 | /* Look up object handles */ |
3776 | flips = 0; | ||
3777 | for (i = 0; i < args->buffer_count; i++) { | 3741 | for (i = 0; i < args->buffer_count; i++) { |
3778 | object_list[i] = drm_gem_object_lookup(dev, file_priv, | 3742 | object_list[i] = drm_gem_object_lookup(dev, file_priv, |
3779 | exec_list[i].handle); | 3743 | exec_list[i].handle); |
@@ -3796,14 +3760,6 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3796 | goto err; | 3760 | goto err; |
3797 | } | 3761 | } |
3798 | obj_priv->in_execbuffer = true; | 3762 | obj_priv->in_execbuffer = true; |
3799 | flips += atomic_read(&obj_priv->pending_flip); | ||
3800 | } | ||
3801 | |||
3802 | if (flips > 0) { | ||
3803 | ret = i915_gem_wait_for_pending_flip(dev, object_list, | ||
3804 | args->buffer_count); | ||
3805 | if (ret) | ||
3806 | goto err; | ||
3807 | } | 3763 | } |
3808 | 3764 | ||
3809 | /* Pin and relocate */ | 3765 | /* Pin and relocate */ |
@@ -3943,9 +3899,38 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3943 | ~0); | 3899 | ~0); |
3944 | #endif | 3900 | #endif |
3945 | 3901 | ||
3902 | /* Check for any pending flips. As we only maintain a flip queue depth | ||
3903 | * of 1, we can simply insert a WAIT for the next display flip prior | ||
3904 | * to executing the batch and avoid stalling the CPU. | ||
3905 | */ | ||
3906 | flips = 0; | ||
3907 | for (i = 0; i < args->buffer_count; i++) { | ||
3908 | if (object_list[i]->write_domain) | ||
3909 | flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip); | ||
3910 | } | ||
3911 | if (flips) { | ||
3912 | int plane, flip_mask; | ||
3913 | |||
3914 | for (plane = 0; flips >> plane; plane++) { | ||
3915 | if (((flips >> plane) & 1) == 0) | ||
3916 | continue; | ||
3917 | |||
3918 | if (plane) | ||
3919 | flip_mask = MI_WAIT_FOR_PLANE_B_FLIP; | ||
3920 | else | ||
3921 | flip_mask = MI_WAIT_FOR_PLANE_A_FLIP; | ||
3922 | |||
3923 | intel_ring_begin(dev, ring, 2); | ||
3924 | intel_ring_emit(dev, ring, | ||
3925 | MI_WAIT_FOR_EVENT | flip_mask); | ||
3926 | intel_ring_emit(dev, ring, MI_NOOP); | ||
3927 | intel_ring_advance(dev, ring); | ||
3928 | } | ||
3929 | } | ||
3930 | |||
3946 | /* Exec the batchbuffer */ | 3931 | /* Exec the batchbuffer */ |
3947 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, | 3932 | ret = ring->dispatch_gem_execbuffer(dev, ring, args, |
3948 | cliprects, exec_offset); | 3933 | cliprects, exec_offset); |
3949 | if (ret) { | 3934 | if (ret) { |
3950 | DRM_ERROR("dispatch failed %d\n", ret); | 3935 | DRM_ERROR("dispatch failed %d\n", ret); |
3951 | goto err; | 3936 | goto err; |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 64c07c24e300..0d051e7f6702 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -298,6 +298,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
298 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 298 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
299 | int ret = IRQ_NONE; | 299 | int ret = IRQ_NONE; |
300 | u32 de_iir, gt_iir, de_ier, pch_iir; | 300 | u32 de_iir, gt_iir, de_ier, pch_iir; |
301 | u32 hotplug_mask; | ||
301 | struct drm_i915_master_private *master_priv; | 302 | struct drm_i915_master_private *master_priv; |
302 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; | 303 | struct intel_ring_buffer *render_ring = &dev_priv->render_ring; |
303 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; | 304 | u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT; |
@@ -317,6 +318,11 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
317 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) | 318 | if (de_iir == 0 && gt_iir == 0 && pch_iir == 0) |
318 | goto done; | 319 | goto done; |
319 | 320 | ||
321 | if (HAS_PCH_CPT(dev)) | ||
322 | hotplug_mask = SDE_HOTPLUG_MASK_CPT; | ||
323 | else | ||
324 | hotplug_mask = SDE_HOTPLUG_MASK; | ||
325 | |||
320 | ret = IRQ_HANDLED; | 326 | ret = IRQ_HANDLED; |
321 | 327 | ||
322 | if (dev->primary->master) { | 328 | if (dev->primary->master) { |
@@ -358,10 +364,8 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev) | |||
358 | drm_handle_vblank(dev, 1); | 364 | drm_handle_vblank(dev, 1); |
359 | 365 | ||
360 | /* check event from PCH */ | 366 | /* check event from PCH */ |
361 | if ((de_iir & DE_PCH_EVENT) && | 367 | if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask)) |
362 | (pch_iir & SDE_HOTPLUG_MASK)) { | ||
363 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); | 368 | queue_work(dev_priv->wq, &dev_priv->hotplug_work); |
364 | } | ||
365 | 369 | ||
366 | if (de_iir & DE_PCU_EVENT) { | 370 | if (de_iir & DE_PCU_EVENT) { |
367 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); | 371 | I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS)); |
@@ -1431,8 +1435,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1431 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | | 1435 | u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT | |
1432 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; | 1436 | DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE; |
1433 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; | 1437 | u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT; |
1434 | u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | 1438 | u32 hotplug_mask; |
1435 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | ||
1436 | 1439 | ||
1437 | dev_priv->irq_mask_reg = ~display_mask; | 1440 | dev_priv->irq_mask_reg = ~display_mask; |
1438 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; | 1441 | dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK; |
@@ -1459,6 +1462,14 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1459 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); | 1462 | I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg); |
1460 | (void) I915_READ(GTIER); | 1463 | (void) I915_READ(GTIER); |
1461 | 1464 | ||
1465 | if (HAS_PCH_CPT(dev)) { | ||
1466 | hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT | | ||
1467 | SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ; | ||
1468 | } else { | ||
1469 | hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG | | ||
1470 | SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG; | ||
1471 | } | ||
1472 | |||
1462 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; | 1473 | dev_priv->pch_irq_mask_reg = ~hotplug_mask; |
1463 | dev_priv->pch_irq_enable_reg = hotplug_mask; | 1474 | dev_priv->pch_irq_enable_reg = hotplug_mask; |
1464 | 1475 | ||
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d02de212e6ad..47032186a31a 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -1373,6 +1373,9 @@ | |||
1373 | #define PP_SEQUENCE_ON (1 << 28) | 1373 | #define PP_SEQUENCE_ON (1 << 28) |
1374 | #define PP_SEQUENCE_OFF (2 << 28) | 1374 | #define PP_SEQUENCE_OFF (2 << 28) |
1375 | #define PP_SEQUENCE_MASK 0x30000000 | 1375 | #define PP_SEQUENCE_MASK 0x30000000 |
1376 | #define PP_CYCLE_DELAY_ACTIVE (1 << 27) | ||
1377 | #define PP_SEQUENCE_STATE_ON_IDLE (1 << 3) | ||
1378 | #define PP_SEQUENCE_STATE_MASK 0x0000000f | ||
1376 | #define PP_CONTROL 0x61204 | 1379 | #define PP_CONTROL 0x61204 |
1377 | #define POWER_TARGET_ON (1 << 0) | 1380 | #define POWER_TARGET_ON (1 << 0) |
1378 | #define PP_ON_DELAYS 0x61208 | 1381 | #define PP_ON_DELAYS 0x61208 |
@@ -2598,6 +2601,10 @@ | |||
2598 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) | 2601 | #define SDE_PORTD_HOTPLUG_CPT (1 << 23) |
2599 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) | 2602 | #define SDE_PORTC_HOTPLUG_CPT (1 << 22) |
2600 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) | 2603 | #define SDE_PORTB_HOTPLUG_CPT (1 << 21) |
2604 | #define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ | ||
2605 | SDE_PORTD_HOTPLUG_CPT | \ | ||
2606 | SDE_PORTC_HOTPLUG_CPT | \ | ||
2607 | SDE_PORTB_HOTPLUG_CPT) | ||
2601 | 2608 | ||
2602 | #define SDEISR 0xc4000 | 2609 | #define SDEISR 0xc4000 |
2603 | #define SDEIMR 0xc4004 | 2610 | #define SDEIMR 0xc4004 |
@@ -2779,6 +2786,10 @@ | |||
2779 | #define FDI_RXA_CHICKEN 0xc200c | 2786 | #define FDI_RXA_CHICKEN 0xc200c |
2780 | #define FDI_RXB_CHICKEN 0xc2010 | 2787 | #define FDI_RXB_CHICKEN 0xc2010 |
2781 | #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) | 2788 | #define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1) |
2789 | #define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN) | ||
2790 | |||
2791 | #define SOUTH_DSPCLK_GATE_D 0xc2020 | ||
2792 | #define PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29) | ||
2782 | 2793 | ||
2783 | /* CPU: FDI_TX */ | 2794 | /* CPU: FDI_TX */ |
2784 | #define FDI_TXA_CTL 0x60100 | 2795 | #define FDI_TXA_CTL 0x60100 |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c new file mode 100644 index 000000000000..65c88f9ba12c --- /dev/null +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -0,0 +1,286 @@ | |||
1 | /* | ||
2 | * Intel ACPI functions | ||
3 | * | ||
4 | * _DSM related code stolen from nouveau_acpi.c. | ||
5 | */ | ||
6 | #include <linux/pci.h> | ||
7 | #include <linux/acpi.h> | ||
8 | #include <linux/vga_switcheroo.h> | ||
9 | #include <acpi/acpi_drivers.h> | ||
10 | |||
11 | #include "drmP.h" | ||
12 | |||
13 | #define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */ | ||
14 | |||
15 | #define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */ | ||
16 | #define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */ | ||
17 | |||
18 | static struct intel_dsm_priv { | ||
19 | acpi_handle dhandle; | ||
20 | } intel_dsm_priv; | ||
21 | |||
22 | static const u8 intel_dsm_guid[] = { | ||
23 | 0xd3, 0x73, 0xd8, 0x7e, | ||
24 | 0xd0, 0xc2, | ||
25 | 0x4f, 0x4e, | ||
26 | 0xa8, 0x54, | ||
27 | 0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c | ||
28 | }; | ||
29 | |||
30 | static int intel_dsm(acpi_handle handle, int func, int arg) | ||
31 | { | ||
32 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
33 | struct acpi_object_list input; | ||
34 | union acpi_object params[4]; | ||
35 | union acpi_object *obj; | ||
36 | u32 result; | ||
37 | int ret = 0; | ||
38 | |||
39 | input.count = 4; | ||
40 | input.pointer = params; | ||
41 | params[0].type = ACPI_TYPE_BUFFER; | ||
42 | params[0].buffer.length = sizeof(intel_dsm_guid); | ||
43 | params[0].buffer.pointer = (char *)intel_dsm_guid; | ||
44 | params[1].type = ACPI_TYPE_INTEGER; | ||
45 | params[1].integer.value = INTEL_DSM_REVISION_ID; | ||
46 | params[2].type = ACPI_TYPE_INTEGER; | ||
47 | params[2].integer.value = func; | ||
48 | params[3].type = ACPI_TYPE_INTEGER; | ||
49 | params[3].integer.value = arg; | ||
50 | |||
51 | ret = acpi_evaluate_object(handle, "_DSM", &input, &output); | ||
52 | if (ret) { | ||
53 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | ||
54 | return ret; | ||
55 | } | ||
56 | |||
57 | obj = (union acpi_object *)output.pointer; | ||
58 | |||
59 | result = 0; | ||
60 | switch (obj->type) { | ||
61 | case ACPI_TYPE_INTEGER: | ||
62 | result = obj->integer.value; | ||
63 | break; | ||
64 | |||
65 | case ACPI_TYPE_BUFFER: | ||
66 | if (obj->buffer.length == 4) { | ||
67 | result =(obj->buffer.pointer[0] | | ||
68 | (obj->buffer.pointer[1] << 8) | | ||
69 | (obj->buffer.pointer[2] << 16) | | ||
70 | (obj->buffer.pointer[3] << 24)); | ||
71 | break; | ||
72 | } | ||
73 | default: | ||
74 | ret = -EINVAL; | ||
75 | break; | ||
76 | } | ||
77 | if (result == 0x80000002) | ||
78 | ret = -ENODEV; | ||
79 | |||
80 | kfree(output.pointer); | ||
81 | return ret; | ||
82 | } | ||
83 | |||
84 | static char *intel_dsm_port_name(u8 id) | ||
85 | { | ||
86 | switch (id) { | ||
87 | case 0: | ||
88 | return "Reserved"; | ||
89 | case 1: | ||
90 | return "Analog VGA"; | ||
91 | case 2: | ||
92 | return "LVDS"; | ||
93 | case 3: | ||
94 | return "Reserved"; | ||
95 | case 4: | ||
96 | return "HDMI/DVI_B"; | ||
97 | case 5: | ||
98 | return "HDMI/DVI_C"; | ||
99 | case 6: | ||
100 | return "HDMI/DVI_D"; | ||
101 | case 7: | ||
102 | return "DisplayPort_A"; | ||
103 | case 8: | ||
104 | return "DisplayPort_B"; | ||
105 | case 9: | ||
106 | return "DisplayPort_C"; | ||
107 | case 0xa: | ||
108 | return "DisplayPort_D"; | ||
109 | case 0xb: | ||
110 | case 0xc: | ||
111 | case 0xd: | ||
112 | return "Reserved"; | ||
113 | case 0xe: | ||
114 | return "WiDi"; | ||
115 | default: | ||
116 | return "bad type"; | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static char *intel_dsm_mux_type(u8 type) | ||
121 | { | ||
122 | switch (type) { | ||
123 | case 0: | ||
124 | return "unknown"; | ||
125 | case 1: | ||
126 | return "No MUX, iGPU only"; | ||
127 | case 2: | ||
128 | return "No MUX, dGPU only"; | ||
129 | case 3: | ||
130 | return "MUXed between iGPU and dGPU"; | ||
131 | default: | ||
132 | return "bad type"; | ||
133 | } | ||
134 | } | ||
135 | |||
136 | static void intel_dsm_platform_mux_info(void) | ||
137 | { | ||
138 | struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; | ||
139 | struct acpi_object_list input; | ||
140 | union acpi_object params[4]; | ||
141 | union acpi_object *pkg; | ||
142 | int i, ret; | ||
143 | |||
144 | input.count = 4; | ||
145 | input.pointer = params; | ||
146 | params[0].type = ACPI_TYPE_BUFFER; | ||
147 | params[0].buffer.length = sizeof(intel_dsm_guid); | ||
148 | params[0].buffer.pointer = (char *)intel_dsm_guid; | ||
149 | params[1].type = ACPI_TYPE_INTEGER; | ||
150 | params[1].integer.value = INTEL_DSM_REVISION_ID; | ||
151 | params[2].type = ACPI_TYPE_INTEGER; | ||
152 | params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO; | ||
153 | params[3].type = ACPI_TYPE_INTEGER; | ||
154 | params[3].integer.value = 0; | ||
155 | |||
156 | ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input, | ||
157 | &output); | ||
158 | if (ret) { | ||
159 | DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret); | ||
160 | goto out; | ||
161 | } | ||
162 | |||
163 | pkg = (union acpi_object *)output.pointer; | ||
164 | |||
165 | if (pkg->type == ACPI_TYPE_PACKAGE) { | ||
166 | union acpi_object *connector_count = &pkg->package.elements[0]; | ||
167 | DRM_DEBUG_DRIVER("MUX info connectors: %lld\n", | ||
168 | (unsigned long long)connector_count->integer.value); | ||
169 | for (i = 1; i < pkg->package.count; i++) { | ||
170 | union acpi_object *obj = &pkg->package.elements[i]; | ||
171 | union acpi_object *connector_id = | ||
172 | &obj->package.elements[0]; | ||
173 | union acpi_object *info = &obj->package.elements[1]; | ||
174 | DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n", | ||
175 | (unsigned long long)connector_id->integer.value); | ||
176 | DRM_DEBUG_DRIVER(" port id: %s\n", | ||
177 | intel_dsm_port_name(info->buffer.pointer[0])); | ||
178 | DRM_DEBUG_DRIVER(" display mux info: %s\n", | ||
179 | intel_dsm_mux_type(info->buffer.pointer[1])); | ||
180 | DRM_DEBUG_DRIVER(" aux/dc mux info: %s\n", | ||
181 | intel_dsm_mux_type(info->buffer.pointer[2])); | ||
182 | DRM_DEBUG_DRIVER(" hpd mux info: %s\n", | ||
183 | intel_dsm_mux_type(info->buffer.pointer[3])); | ||
184 | } | ||
185 | } else { | ||
186 | DRM_ERROR("MUX INFO call failed\n"); | ||
187 | } | ||
188 | |||
189 | out: | ||
190 | kfree(output.pointer); | ||
191 | } | ||
192 | |||
193 | static int intel_dsm_switchto(enum vga_switcheroo_client_id id) | ||
194 | { | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int intel_dsm_power_state(enum vga_switcheroo_client_id id, | ||
199 | enum vga_switcheroo_state state) | ||
200 | { | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int intel_dsm_init(void) | ||
205 | { | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int intel_dsm_get_client_id(struct pci_dev *pdev) | ||
210 | { | ||
211 | if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) | ||
212 | return VGA_SWITCHEROO_IGD; | ||
213 | else | ||
214 | return VGA_SWITCHEROO_DIS; | ||
215 | } | ||
216 | |||
217 | static struct vga_switcheroo_handler intel_dsm_handler = { | ||
218 | .switchto = intel_dsm_switchto, | ||
219 | .power_state = intel_dsm_power_state, | ||
220 | .init = intel_dsm_init, | ||
221 | .get_client_id = intel_dsm_get_client_id, | ||
222 | }; | ||
223 | |||
224 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) | ||
225 | { | ||
226 | acpi_handle dhandle, intel_handle; | ||
227 | acpi_status status; | ||
228 | int ret; | ||
229 | |||
230 | dhandle = DEVICE_ACPI_HANDLE(&pdev->dev); | ||
231 | if (!dhandle) | ||
232 | return false; | ||
233 | |||
234 | status = acpi_get_handle(dhandle, "_DSM", &intel_handle); | ||
235 | if (ACPI_FAILURE(status)) { | ||
236 | DRM_DEBUG_KMS("no _DSM method for intel device\n"); | ||
237 | return false; | ||
238 | } | ||
239 | |||
240 | ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0); | ||
241 | if (ret < 0) { | ||
242 | DRM_ERROR("failed to get supported _DSM functions\n"); | ||
243 | return false; | ||
244 | } | ||
245 | |||
246 | intel_dsm_priv.dhandle = dhandle; | ||
247 | |||
248 | intel_dsm_platform_mux_info(); | ||
249 | return true; | ||
250 | } | ||
251 | |||
252 | static bool intel_dsm_detect(void) | ||
253 | { | ||
254 | char acpi_method_name[255] = { 0 }; | ||
255 | struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name}; | ||
256 | struct pci_dev *pdev = NULL; | ||
257 | bool has_dsm = false; | ||
258 | int vga_count = 0; | ||
259 | |||
260 | while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) { | ||
261 | vga_count++; | ||
262 | has_dsm |= intel_dsm_pci_probe(pdev); | ||
263 | } | ||
264 | |||
265 | if (vga_count == 2 && has_dsm) { | ||
266 | acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer); | ||
267 | DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n", | ||
268 | acpi_method_name); | ||
269 | return true; | ||
270 | } | ||
271 | |||
272 | return false; | ||
273 | } | ||
274 | |||
275 | void intel_register_dsm_handler(void) | ||
276 | { | ||
277 | if (!intel_dsm_detect()) | ||
278 | return; | ||
279 | |||
280 | vga_switcheroo_register_handler(&intel_dsm_handler); | ||
281 | } | ||
282 | |||
283 | void intel_unregister_dsm_handler(void) | ||
284 | { | ||
285 | vga_switcheroo_unregister_handler(); | ||
286 | } | ||
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b1f73ac0f3fd..cc15447eff41 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -24,6 +24,7 @@ | |||
24 | * Eric Anholt <eric@anholt.net> | 24 | * Eric Anholt <eric@anholt.net> |
25 | * | 25 | * |
26 | */ | 26 | */ |
27 | #include <drm/drm_dp_helper.h> | ||
27 | #include "drmP.h" | 28 | #include "drmP.h" |
28 | #include "drm.h" | 29 | #include "drm.h" |
29 | #include "i915_drm.h" | 30 | #include "i915_drm.h" |
@@ -413,6 +414,8 @@ static void | |||
413 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | 414 | parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) |
414 | { | 415 | { |
415 | struct bdb_edp *edp; | 416 | struct bdb_edp *edp; |
417 | struct edp_power_seq *edp_pps; | ||
418 | struct edp_link_params *edp_link_params; | ||
416 | 419 | ||
417 | edp = find_section(bdb, BDB_EDP); | 420 | edp = find_section(bdb, BDB_EDP); |
418 | if (!edp) { | 421 | if (!edp) { |
@@ -437,19 +440,54 @@ parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb) | |||
437 | break; | 440 | break; |
438 | } | 441 | } |
439 | 442 | ||
440 | dev_priv->edp.rate = edp->link_params[panel_type].rate; | 443 | /* Get the eDP sequencing and link info */ |
441 | dev_priv->edp.lanes = edp->link_params[panel_type].lanes; | 444 | edp_pps = &edp->power_seqs[panel_type]; |
442 | dev_priv->edp.preemphasis = edp->link_params[panel_type].preemphasis; | 445 | edp_link_params = &edp->link_params[panel_type]; |
443 | dev_priv->edp.vswing = edp->link_params[panel_type].vswing; | ||
444 | 446 | ||
445 | DRM_DEBUG_KMS("eDP vBIOS settings: bpp=%d, rate=%d, lanes=%d, preemphasis=%d, vswing=%d\n", | 447 | dev_priv->edp.pps = *edp_pps; |
446 | dev_priv->edp.bpp, | ||
447 | dev_priv->edp.rate, | ||
448 | dev_priv->edp.lanes, | ||
449 | dev_priv->edp.preemphasis, | ||
450 | dev_priv->edp.vswing); | ||
451 | 448 | ||
452 | dev_priv->edp.initialized = true; | 449 | dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 : |
450 | DP_LINK_BW_1_62; | ||
451 | switch (edp_link_params->lanes) { | ||
452 | case 0: | ||
453 | dev_priv->edp.lanes = 1; | ||
454 | break; | ||
455 | case 1: | ||
456 | dev_priv->edp.lanes = 2; | ||
457 | break; | ||
458 | case 3: | ||
459 | default: | ||
460 | dev_priv->edp.lanes = 4; | ||
461 | break; | ||
462 | } | ||
463 | switch (edp_link_params->preemphasis) { | ||
464 | case 0: | ||
465 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0; | ||
466 | break; | ||
467 | case 1: | ||
468 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5; | ||
469 | break; | ||
470 | case 2: | ||
471 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6; | ||
472 | break; | ||
473 | case 3: | ||
474 | dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5; | ||
475 | break; | ||
476 | } | ||
477 | switch (edp_link_params->vswing) { | ||
478 | case 0: | ||
479 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400; | ||
480 | break; | ||
481 | case 1: | ||
482 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600; | ||
483 | break; | ||
484 | case 2: | ||
485 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800; | ||
486 | break; | ||
487 | case 3: | ||
488 | dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200; | ||
489 | break; | ||
490 | } | ||
453 | } | 491 | } |
454 | 492 | ||
455 | static void | 493 | static void |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 389fcd2aea1f..c55c77043357 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -191,7 +191,8 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | |||
191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | 191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
192 | 192 | ||
193 | if (turn_off_dac) { | 193 | if (turn_off_dac) { |
194 | I915_WRITE(PCH_ADPA, temp); | 194 | /* Make sure hotplug is enabled */ |
195 | I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); | ||
195 | (void)I915_READ(PCH_ADPA); | 196 | (void)I915_READ(PCH_ADPA); |
196 | } | 197 | } |
197 | 198 | ||
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 96d08a9f3aaa..faacbbdbb270 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -932,10 +932,6 @@ intel_find_pll_ironlake_dp(const intel_limit_t *limit, struct drm_crtc *crtc, | |||
932 | struct drm_device *dev = crtc->dev; | 932 | struct drm_device *dev = crtc->dev; |
933 | intel_clock_t clock; | 933 | intel_clock_t clock; |
934 | 934 | ||
935 | /* return directly when it is eDP */ | ||
936 | if (HAS_eDP) | ||
937 | return true; | ||
938 | |||
939 | if (target < 200000) { | 935 | if (target < 200000) { |
940 | clock.n = 1; | 936 | clock.n = 1; |
941 | clock.p1 = 2; | 937 | clock.p1 = 2; |
@@ -1719,6 +1715,9 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1719 | POSTING_READ(reg); | 1715 | POSTING_READ(reg); |
1720 | udelay(150); | 1716 | udelay(150); |
1721 | 1717 | ||
1718 | /* Ironlake workaround, enable clock pointer after FDI enable*/ | ||
1719 | I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_ENABLE); | ||
1720 | |||
1722 | reg = FDI_RX_IIR(pipe); | 1721 | reg = FDI_RX_IIR(pipe); |
1723 | for (tries = 0; tries < 5; tries++) { | 1722 | for (tries = 0; tries < 5; tries++) { |
1724 | temp = I915_READ(reg); | 1723 | temp = I915_READ(reg); |
@@ -1764,6 +1763,28 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc) | |||
1764 | DRM_ERROR("FDI train 2 fail!\n"); | 1763 | DRM_ERROR("FDI train 2 fail!\n"); |
1765 | 1764 | ||
1766 | DRM_DEBUG_KMS("FDI train done\n"); | 1765 | DRM_DEBUG_KMS("FDI train done\n"); |
1766 | |||
1767 | /* enable normal train */ | ||
1768 | reg = FDI_TX_CTL(pipe); | ||
1769 | temp = I915_READ(reg); | ||
1770 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1771 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
1772 | I915_WRITE(reg, temp); | ||
1773 | |||
1774 | reg = FDI_RX_CTL(pipe); | ||
1775 | temp = I915_READ(reg); | ||
1776 | if (HAS_PCH_CPT(dev)) { | ||
1777 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
1778 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
1779 | } else { | ||
1780 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
1781 | temp |= FDI_LINK_TRAIN_NONE; | ||
1782 | } | ||
1783 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
1784 | |||
1785 | /* wait one idle pattern time */ | ||
1786 | POSTING_READ(reg); | ||
1787 | udelay(1000); | ||
1767 | } | 1788 | } |
1768 | 1789 | ||
1769 | static const int const snb_b_fdi_train_param [] = { | 1790 | static const int const snb_b_fdi_train_param [] = { |
@@ -2002,8 +2023,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2002 | 2023 | ||
2003 | /* Enable panel fitting for LVDS */ | 2024 | /* Enable panel fitting for LVDS */ |
2004 | if (dev_priv->pch_pf_size && | 2025 | if (dev_priv->pch_pf_size && |
2005 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) | 2026 | (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS) || HAS_eDP)) { |
2006 | || HAS_eDP || intel_pch_has_edp(crtc))) { | ||
2007 | /* Force use of hard-coded filter coefficients | 2027 | /* Force use of hard-coded filter coefficients |
2008 | * as some pre-programmed values are broken, | 2028 | * as some pre-programmed values are broken, |
2009 | * e.g. x201. | 2029 | * e.g. x201. |
@@ -2022,7 +2042,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2022 | if ((temp & PIPECONF_ENABLE) == 0) { | 2042 | if ((temp & PIPECONF_ENABLE) == 0) { |
2023 | I915_WRITE(reg, temp | PIPECONF_ENABLE); | 2043 | I915_WRITE(reg, temp | PIPECONF_ENABLE); |
2024 | POSTING_READ(reg); | 2044 | POSTING_READ(reg); |
2025 | udelay(100); | 2045 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
2026 | } | 2046 | } |
2027 | 2047 | ||
2028 | /* configure and enable CPU plane */ | 2048 | /* configure and enable CPU plane */ |
@@ -2067,28 +2087,6 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2067 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); | 2087 | I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); |
2068 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); | 2088 | I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); |
2069 | 2089 | ||
2070 | /* enable normal train */ | ||
2071 | reg = FDI_TX_CTL(pipe); | ||
2072 | temp = I915_READ(reg); | ||
2073 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2074 | temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE; | ||
2075 | I915_WRITE(reg, temp); | ||
2076 | |||
2077 | reg = FDI_RX_CTL(pipe); | ||
2078 | temp = I915_READ(reg); | ||
2079 | if (HAS_PCH_CPT(dev)) { | ||
2080 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | ||
2081 | temp |= FDI_LINK_TRAIN_NORMAL_CPT; | ||
2082 | } else { | ||
2083 | temp &= ~FDI_LINK_TRAIN_NONE; | ||
2084 | temp |= FDI_LINK_TRAIN_NONE; | ||
2085 | } | ||
2086 | I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE); | ||
2087 | |||
2088 | /* wait one idle pattern time */ | ||
2089 | POSTING_READ(reg); | ||
2090 | udelay(100); | ||
2091 | |||
2092 | /* For PCH DP, enable TRANS_DP_CTL */ | 2090 | /* For PCH DP, enable TRANS_DP_CTL */ |
2093 | if (HAS_PCH_CPT(dev) && | 2091 | if (HAS_PCH_CPT(dev) && |
2094 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { | 2092 | intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { |
@@ -2134,7 +2132,7 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2134 | temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; | 2132 | temp |= I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK; |
2135 | I915_WRITE(reg, temp | TRANS_ENABLE); | 2133 | I915_WRITE(reg, temp | TRANS_ENABLE); |
2136 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) | 2134 | if (wait_for(I915_READ(reg) & TRANS_STATE_ENABLE, 100)) |
2137 | DRM_ERROR("failed to enable transcoder\n"); | 2135 | DRM_ERROR("failed to enable transcoder %d\n", pipe); |
2138 | 2136 | ||
2139 | intel_crtc_load_lut(crtc); | 2137 | intel_crtc_load_lut(crtc); |
2140 | intel_update_fbc(dev); | 2138 | intel_update_fbc(dev); |
@@ -2174,9 +2172,9 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2174 | temp = I915_READ(reg); | 2172 | temp = I915_READ(reg); |
2175 | if (temp & PIPECONF_ENABLE) { | 2173 | if (temp & PIPECONF_ENABLE) { |
2176 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); | 2174 | I915_WRITE(reg, temp & ~PIPECONF_ENABLE); |
2175 | POSTING_READ(reg); | ||
2177 | /* wait for cpu pipe off, pipe state */ | 2176 | /* wait for cpu pipe off, pipe state */ |
2178 | if (wait_for((I915_READ(reg) & I965_PIPECONF_ACTIVE) == 0, 50)) | 2177 | intel_wait_for_pipe_off(dev, intel_crtc->pipe); |
2179 | DRM_ERROR("failed to turn off cpu pipe\n"); | ||
2180 | } | 2178 | } |
2181 | 2179 | ||
2182 | /* Disable PF */ | 2180 | /* Disable PF */ |
@@ -2198,6 +2196,11 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
2198 | POSTING_READ(reg); | 2196 | POSTING_READ(reg); |
2199 | udelay(100); | 2197 | udelay(100); |
2200 | 2198 | ||
2199 | /* Ironlake workaround, disable clock pointer after downing FDI */ | ||
2200 | I915_WRITE(FDI_RX_CHICKEN(pipe), | ||
2201 | I915_READ(FDI_RX_CHICKEN(pipe) & | ||
2202 | ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); | ||
2203 | |||
2201 | /* still set train pattern 1 */ | 2204 | /* still set train pattern 1 */ |
2202 | reg = FDI_TX_CTL(pipe); | 2205 | reg = FDI_TX_CTL(pipe); |
2203 | temp = I915_READ(reg); | 2206 | temp = I915_READ(reg); |
@@ -3623,7 +3626,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3623 | refclk / 1000); | 3626 | refclk / 1000); |
3624 | } else if (!IS_GEN2(dev)) { | 3627 | } else if (!IS_GEN2(dev)) { |
3625 | refclk = 96000; | 3628 | refclk = 96000; |
3626 | if (HAS_PCH_SPLIT(dev)) | 3629 | if (HAS_PCH_SPLIT(dev) && |
3630 | (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base))) | ||
3627 | refclk = 120000; /* 120Mhz refclk */ | 3631 | refclk = 120000; /* 120Mhz refclk */ |
3628 | } else { | 3632 | } else { |
3629 | refclk = 48000; | 3633 | refclk = 48000; |
@@ -3685,16 +3689,16 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3685 | /* FDI link */ | 3689 | /* FDI link */ |
3686 | if (HAS_PCH_SPLIT(dev)) { | 3690 | if (HAS_PCH_SPLIT(dev)) { |
3687 | int lane = 0, link_bw, bpp; | 3691 | int lane = 0, link_bw, bpp; |
3688 | /* eDP doesn't require FDI link, so just set DP M/N | 3692 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
3689 | according to current link config */ | 3693 | according to current link config */ |
3690 | if (has_edp_encoder) { | 3694 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&encoder->base)) { |
3691 | target_clock = mode->clock; | 3695 | target_clock = mode->clock; |
3692 | intel_edp_link_config(has_edp_encoder, | 3696 | intel_edp_link_config(has_edp_encoder, |
3693 | &lane, &link_bw); | 3697 | &lane, &link_bw); |
3694 | } else { | 3698 | } else { |
3695 | /* DP over FDI requires target mode clock | 3699 | /* [e]DP over FDI requires target mode clock |
3696 | instead of link clock */ | 3700 | instead of link clock */ |
3697 | if (is_dp) | 3701 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
3698 | target_clock = mode->clock; | 3702 | target_clock = mode->clock; |
3699 | else | 3703 | else |
3700 | target_clock = adjusted_mode->clock; | 3704 | target_clock = adjusted_mode->clock; |
@@ -3718,7 +3722,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3718 | temp |= PIPE_8BPC; | 3722 | temp |= PIPE_8BPC; |
3719 | else | 3723 | else |
3720 | temp |= PIPE_6BPC; | 3724 | temp |= PIPE_6BPC; |
3721 | } else if (has_edp_encoder || (is_dp && intel_pch_has_edp(crtc))) { | 3725 | } else if (has_edp_encoder) { |
3722 | switch (dev_priv->edp.bpp/3) { | 3726 | switch (dev_priv->edp.bpp/3) { |
3723 | case 8: | 3727 | case 8: |
3724 | temp |= PIPE_8BPC; | 3728 | temp |= PIPE_8BPC; |
@@ -3794,13 +3798,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3794 | 3798 | ||
3795 | POSTING_READ(PCH_DREF_CONTROL); | 3799 | POSTING_READ(PCH_DREF_CONTROL); |
3796 | udelay(200); | 3800 | udelay(200); |
3801 | } | ||
3802 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
3797 | 3803 | ||
3798 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 3804 | /* Enable CPU source on CPU attached eDP */ |
3799 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 3805 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
3806 | if (dev_priv->lvds_use_ssc) | ||
3807 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | ||
3808 | else | ||
3809 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | ||
3800 | } else { | 3810 | } else { |
3801 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 3811 | /* Enable SSC on PCH eDP if needed */ |
3812 | if (dev_priv->lvds_use_ssc) { | ||
3813 | DRM_ERROR("enabling SSC on PCH\n"); | ||
3814 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
3815 | } | ||
3802 | } | 3816 | } |
3803 | I915_WRITE(PCH_DREF_CONTROL, temp); | 3817 | I915_WRITE(PCH_DREF_CONTROL, temp); |
3818 | POSTING_READ(PCH_DREF_CONTROL); | ||
3819 | udelay(200); | ||
3804 | } | 3820 | } |
3805 | } | 3821 | } |
3806 | 3822 | ||
@@ -3835,7 +3851,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3835 | } | 3851 | } |
3836 | dpll |= DPLL_DVO_HIGH_SPEED; | 3852 | dpll |= DPLL_DVO_HIGH_SPEED; |
3837 | } | 3853 | } |
3838 | if (is_dp) | 3854 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) |
3839 | dpll |= DPLL_DVO_HIGH_SPEED; | 3855 | dpll |= DPLL_DVO_HIGH_SPEED; |
3840 | 3856 | ||
3841 | /* compute bitmask from p1 value */ | 3857 | /* compute bitmask from p1 value */ |
@@ -3934,7 +3950,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3934 | dpll_reg = DPLL(pipe); | 3950 | dpll_reg = DPLL(pipe); |
3935 | } | 3951 | } |
3936 | 3952 | ||
3937 | if (!has_edp_encoder) { | 3953 | /* PCH eDP needs FDI, but CPU eDP does not */ |
3954 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | ||
3938 | I915_WRITE(fp_reg, fp); | 3955 | I915_WRITE(fp_reg, fp); |
3939 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); | 3956 | I915_WRITE(dpll_reg, dpll & ~DPLL_VCO_ENABLE); |
3940 | 3957 | ||
@@ -4011,9 +4028,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4011 | } | 4028 | } |
4012 | } | 4029 | } |
4013 | 4030 | ||
4014 | if (is_dp) | 4031 | if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4015 | intel_dp_set_m_n(crtc, mode, adjusted_mode); | 4032 | intel_dp_set_m_n(crtc, mode, adjusted_mode); |
4016 | else if (HAS_PCH_SPLIT(dev)) { | 4033 | } else if (HAS_PCH_SPLIT(dev)) { |
4017 | /* For non-DP output, clear any trans DP clock recovery setting.*/ | 4034 | /* For non-DP output, clear any trans DP clock recovery setting.*/ |
4018 | if (pipe == 0) { | 4035 | if (pipe == 0) { |
4019 | I915_WRITE(TRANSA_DATA_M1, 0); | 4036 | I915_WRITE(TRANSA_DATA_M1, 0); |
@@ -4028,7 +4045,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4028 | } | 4045 | } |
4029 | } | 4046 | } |
4030 | 4047 | ||
4031 | if (!has_edp_encoder) { | 4048 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4032 | I915_WRITE(fp_reg, fp); | 4049 | I915_WRITE(fp_reg, fp); |
4033 | I915_WRITE(dpll_reg, dpll); | 4050 | I915_WRITE(dpll_reg, dpll); |
4034 | 4051 | ||
@@ -4122,29 +4139,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
4122 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); | 4139 | I915_WRITE(PIPE_LINK_M1(pipe), m_n.link_m); |
4123 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); | 4140 | I915_WRITE(PIPE_LINK_N1(pipe), m_n.link_n); |
4124 | 4141 | ||
4125 | if (has_edp_encoder) { | 4142 | if (has_edp_encoder && !intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
4126 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); | 4143 | ironlake_set_pll_edp(crtc, adjusted_mode->clock); |
4127 | } else { | ||
4128 | /* enable FDI RX PLL too */ | ||
4129 | reg = FDI_RX_CTL(pipe); | ||
4130 | temp = I915_READ(reg); | ||
4131 | I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE); | ||
4132 | |||
4133 | POSTING_READ(reg); | ||
4134 | udelay(200); | ||
4135 | |||
4136 | /* enable FDI TX PLL too */ | ||
4137 | reg = FDI_TX_CTL(pipe); | ||
4138 | temp = I915_READ(reg); | ||
4139 | I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE); | ||
4140 | |||
4141 | /* enable FDI RX PCDCLK */ | ||
4142 | reg = FDI_RX_CTL(pipe); | ||
4143 | temp = I915_READ(reg); | ||
4144 | I915_WRITE(reg, temp | FDI_PCDCLK); | ||
4145 | |||
4146 | POSTING_READ(reg); | ||
4147 | udelay(200); | ||
4148 | } | 4144 | } |
4149 | } | 4145 | } |
4150 | 4146 | ||
@@ -4995,8 +4991,9 @@ static void do_intel_finish_page_flip(struct drm_device *dev, | |||
4995 | obj_priv = to_intel_bo(work->pending_flip_obj); | 4991 | obj_priv = to_intel_bo(work->pending_flip_obj); |
4996 | 4992 | ||
4997 | /* Initial scanout buffer will have a 0 pending flip count */ | 4993 | /* Initial scanout buffer will have a 0 pending flip count */ |
4998 | if ((atomic_read(&obj_priv->pending_flip) == 0) || | 4994 | atomic_clear_mask(1 << intel_crtc->plane, |
4999 | atomic_dec_and_test(&obj_priv->pending_flip)) | 4995 | &obj_priv->pending_flip.counter); |
4996 | if (atomic_read(&obj_priv->pending_flip) == 0) | ||
5000 | wake_up(&dev_priv->pending_flip_queue); | 4997 | wake_up(&dev_priv->pending_flip_queue); |
5001 | schedule_work(&work->work); | 4998 | schedule_work(&work->work); |
5002 | 4999 | ||
@@ -5093,7 +5090,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, | |||
5093 | goto cleanup_objs; | 5090 | goto cleanup_objs; |
5094 | 5091 | ||
5095 | obj_priv = to_intel_bo(obj); | 5092 | obj_priv = to_intel_bo(obj); |
5096 | atomic_inc(&obj_priv->pending_flip); | 5093 | atomic_add(1 << intel_crtc->plane, &obj_priv->pending_flip); |
5097 | work->pending_flip_obj = obj; | 5094 | work->pending_flip_obj = obj; |
5098 | 5095 | ||
5099 | if (IS_GEN3(dev) || IS_GEN2(dev)) { | 5096 | if (IS_GEN3(dev) || IS_GEN2(dev)) { |
@@ -5750,6 +5747,13 @@ void intel_init_clock_gating(struct drm_device *dev) | |||
5750 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); | 5747 | I915_WRITE(PCH_DSPCLK_GATE_D, dspclk_gate); |
5751 | 5748 | ||
5752 | /* | 5749 | /* |
5750 | * On Ibex Peak and Cougar Point, we need to disable clock | ||
5751 | * gating for the panel power sequencer or it will fail to | ||
5752 | * start up when no ports are active. | ||
5753 | */ | ||
5754 | I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); | ||
5755 | |||
5756 | /* | ||
5753 | * According to the spec the following bits should be set in | 5757 | * According to the spec the following bits should be set in |
5754 | * order to enable memory self-refresh | 5758 | * order to enable memory self-refresh |
5755 | * The bit 22/21 of 0x42004 | 5759 | * The bit 22/21 of 0x42004 |
@@ -6131,6 +6135,9 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
6131 | drm_kms_helper_poll_fini(dev); | 6135 | drm_kms_helper_poll_fini(dev); |
6132 | mutex_lock(&dev->struct_mutex); | 6136 | mutex_lock(&dev->struct_mutex); |
6133 | 6137 | ||
6138 | intel_unregister_dsm_handler(); | ||
6139 | |||
6140 | |||
6134 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { | 6141 | list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
6135 | /* Skip inactive CRTCs */ | 6142 | /* Skip inactive CRTCs */ |
6136 | if (!crtc->fb) | 6143 | if (!crtc->fb) |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 152d94507b79..128c2fefd541 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -42,9 +42,6 @@ | |||
42 | 42 | ||
43 | #define DP_LINK_CONFIGURATION_SIZE 9 | 43 | #define DP_LINK_CONFIGURATION_SIZE 9 |
44 | 44 | ||
45 | #define IS_eDP(i) ((i)->base.type == INTEL_OUTPUT_EDP) | ||
46 | #define IS_PCH_eDP(i) ((i)->is_pch_edp) | ||
47 | |||
48 | struct intel_dp { | 45 | struct intel_dp { |
49 | struct intel_encoder base; | 46 | struct intel_encoder base; |
50 | uint32_t output_reg; | 47 | uint32_t output_reg; |
@@ -62,6 +59,31 @@ struct intel_dp { | |||
62 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 59 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
63 | }; | 60 | }; |
64 | 61 | ||
62 | /** | ||
63 | * is_edp - is the given port attached to an eDP panel (either CPU or PCH) | ||
64 | * @intel_dp: DP struct | ||
65 | * | ||
66 | * If a CPU or PCH DP output is attached to an eDP panel, this function | ||
67 | * will return true, and false otherwise. | ||
68 | */ | ||
69 | static bool is_edp(struct intel_dp *intel_dp) | ||
70 | { | ||
71 | return intel_dp->base.type == INTEL_OUTPUT_EDP; | ||
72 | } | ||
73 | |||
74 | /** | ||
75 | * is_pch_edp - is the port on the PCH and attached to an eDP panel? | ||
76 | * @intel_dp: DP struct | ||
77 | * | ||
78 | * Returns true if the given DP struct corresponds to a PCH DP port attached | ||
79 | * to an eDP panel, false otherwise. Helpful for determining whether we | ||
80 | * may need FDI resources for a given DP output or not. | ||
81 | */ | ||
82 | static bool is_pch_edp(struct intel_dp *intel_dp) | ||
83 | { | ||
84 | return intel_dp->is_pch_edp; | ||
85 | } | ||
86 | |||
65 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | 87 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
66 | { | 88 | { |
67 | return container_of(encoder, struct intel_dp, base.base); | 89 | return container_of(encoder, struct intel_dp, base.base); |
@@ -73,6 +95,25 @@ static struct intel_dp *intel_attached_dp(struct drm_connector *connector) | |||
73 | struct intel_dp, base); | 95 | struct intel_dp, base); |
74 | } | 96 | } |
75 | 97 | ||
98 | /** | ||
99 | * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? | ||
100 | * @encoder: DRM encoder | ||
101 | * | ||
102 | * Return true if @encoder corresponds to a PCH attached eDP panel. Needed | ||
103 | * by intel_display.c. | ||
104 | */ | ||
105 | bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) | ||
106 | { | ||
107 | struct intel_dp *intel_dp; | ||
108 | |||
109 | if (!encoder) | ||
110 | return false; | ||
111 | |||
112 | intel_dp = enc_to_intel_dp(encoder); | ||
113 | |||
114 | return is_pch_edp(intel_dp); | ||
115 | } | ||
116 | |||
76 | static void intel_dp_start_link_train(struct intel_dp *intel_dp); | 117 | static void intel_dp_start_link_train(struct intel_dp *intel_dp); |
77 | static void intel_dp_complete_link_train(struct intel_dp *intel_dp); | 118 | static void intel_dp_complete_link_train(struct intel_dp *intel_dp); |
78 | static void intel_dp_link_down(struct intel_dp *intel_dp); | 119 | static void intel_dp_link_down(struct intel_dp *intel_dp); |
@@ -138,7 +179,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi | |||
138 | { | 179 | { |
139 | struct drm_i915_private *dev_priv = dev->dev_private; | 180 | struct drm_i915_private *dev_priv = dev->dev_private; |
140 | 181 | ||
141 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 182 | if (is_edp(intel_dp)) |
142 | return (pixel_clock * dev_priv->edp.bpp + 7) / 8; | 183 | return (pixel_clock * dev_priv->edp.bpp + 7) / 8; |
143 | else | 184 | else |
144 | return pixel_clock * 3; | 185 | return pixel_clock * 3; |
@@ -160,8 +201,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
160 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); | 201 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
161 | int max_lanes = intel_dp_max_lane_count(intel_dp); | 202 | int max_lanes = intel_dp_max_lane_count(intel_dp); |
162 | 203 | ||
163 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 204 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { |
164 | dev_priv->panel_fixed_mode) { | ||
165 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | 205 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) |
166 | return MODE_PANEL; | 206 | return MODE_PANEL; |
167 | 207 | ||
@@ -171,7 +211,7 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
171 | 211 | ||
172 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels | 212 | /* only refuse the mode on non eDP since we have seen some wierd eDP panels |
173 | which are outside spec tolerances but somehow work by magic */ | 213 | which are outside spec tolerances but somehow work by magic */ |
174 | if (!IS_eDP(intel_dp) && | 214 | if (!is_edp(intel_dp) && |
175 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) | 215 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) |
176 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | 216 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) |
177 | return MODE_CLOCK_HIGH; | 217 | return MODE_CLOCK_HIGH; |
@@ -258,7 +298,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
258 | * Note that PCH attached eDP panels should use a 125MHz input | 298 | * Note that PCH attached eDP panels should use a 125MHz input |
259 | * clock divider. | 299 | * clock divider. |
260 | */ | 300 | */ |
261 | if (IS_eDP(intel_dp) && !IS_PCH_eDP(intel_dp)) { | 301 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { |
262 | if (IS_GEN6(dev)) | 302 | if (IS_GEN6(dev)) |
263 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ | 303 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ |
264 | else | 304 | else |
@@ -530,8 +570,7 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
530 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; | 570 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
531 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 571 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
532 | 572 | ||
533 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 573 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { |
534 | dev_priv->panel_fixed_mode) { | ||
535 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); | 574 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); |
536 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | 575 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, |
537 | mode, adjusted_mode); | 576 | mode, adjusted_mode); |
@@ -542,6 +581,17 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
542 | mode->clock = dev_priv->panel_fixed_mode->clock; | 581 | mode->clock = dev_priv->panel_fixed_mode->clock; |
543 | } | 582 | } |
544 | 583 | ||
584 | /* Just use VBT values for eDP */ | ||
585 | if (is_edp(intel_dp)) { | ||
586 | intel_dp->lane_count = dev_priv->edp.lanes; | ||
587 | intel_dp->link_bw = dev_priv->edp.rate; | ||
588 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
589 | DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", | ||
590 | intel_dp->link_bw, intel_dp->lane_count, | ||
591 | adjusted_mode->clock); | ||
592 | return true; | ||
593 | } | ||
594 | |||
545 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 595 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
546 | for (clock = 0; clock <= max_clock; clock++) { | 596 | for (clock = 0; clock <= max_clock; clock++) { |
547 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 597 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
@@ -560,19 +610,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
560 | } | 610 | } |
561 | } | 611 | } |
562 | 612 | ||
563 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | ||
564 | /* okay we failed just pick the highest */ | ||
565 | intel_dp->lane_count = max_lane_count; | ||
566 | intel_dp->link_bw = bws[max_clock]; | ||
567 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
568 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
569 | "count %d clock %d\n", | ||
570 | intel_dp->link_bw, intel_dp->lane_count, | ||
571 | adjusted_mode->clock); | ||
572 | |||
573 | return true; | ||
574 | } | ||
575 | |||
576 | return false; | 613 | return false; |
577 | } | 614 | } |
578 | 615 | ||
@@ -609,25 +646,6 @@ intel_dp_compute_m_n(int bpp, | |||
609 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); | 646 | intel_reduce_ratio(&m_n->link_m, &m_n->link_n); |
610 | } | 647 | } |
611 | 648 | ||
612 | bool intel_pch_has_edp(struct drm_crtc *crtc) | ||
613 | { | ||
614 | struct drm_device *dev = crtc->dev; | ||
615 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
616 | struct drm_encoder *encoder; | ||
617 | |||
618 | list_for_each_entry(encoder, &mode_config->encoder_list, head) { | ||
619 | struct intel_dp *intel_dp; | ||
620 | |||
621 | if (encoder->crtc != crtc) | ||
622 | continue; | ||
623 | |||
624 | intel_dp = enc_to_intel_dp(encoder); | ||
625 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) | ||
626 | return intel_dp->is_pch_edp; | ||
627 | } | ||
628 | return false; | ||
629 | } | ||
630 | |||
631 | void | 649 | void |
632 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 650 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
633 | struct drm_display_mode *adjusted_mode) | 651 | struct drm_display_mode *adjusted_mode) |
@@ -652,8 +670,10 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
652 | intel_dp = enc_to_intel_dp(encoder); | 670 | intel_dp = enc_to_intel_dp(encoder); |
653 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { | 671 | if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { |
654 | lane_count = intel_dp->lane_count; | 672 | lane_count = intel_dp->lane_count; |
655 | if (IS_PCH_eDP(intel_dp)) | 673 | break; |
656 | bpp = dev_priv->edp.bpp; | 674 | } else if (is_edp(intel_dp)) { |
675 | lane_count = dev_priv->edp.lanes; | ||
676 | bpp = dev_priv->edp.bpp; | ||
657 | break; | 677 | break; |
658 | } | 678 | } |
659 | } | 679 | } |
@@ -720,7 +740,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
720 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 740 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
721 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 741 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
722 | 742 | ||
723 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 743 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
724 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 744 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
725 | else | 745 | else |
726 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | 746 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
@@ -755,7 +775,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
755 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | 775 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) |
756 | intel_dp->DP |= DP_PIPEB_SELECT; | 776 | intel_dp->DP |= DP_PIPEB_SELECT; |
757 | 777 | ||
758 | if (IS_eDP(intel_dp)) { | 778 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { |
759 | /* don't miss out required setting for eDP */ | 779 | /* don't miss out required setting for eDP */ |
760 | intel_dp->DP |= DP_PLL_ENABLE; | 780 | intel_dp->DP |= DP_PLL_ENABLE; |
761 | if (adjusted_mode->clock < 200000) | 781 | if (adjusted_mode->clock < 200000) |
@@ -766,10 +786,11 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
766 | } | 786 | } |
767 | 787 | ||
768 | /* Returns true if the panel was already on when called */ | 788 | /* Returns true if the panel was already on when called */ |
769 | static bool ironlake_edp_panel_on (struct drm_device *dev) | 789 | static bool ironlake_edp_panel_on (struct intel_dp *intel_dp) |
770 | { | 790 | { |
791 | struct drm_device *dev = intel_dp->base.base.dev; | ||
771 | struct drm_i915_private *dev_priv = dev->dev_private; | 792 | struct drm_i915_private *dev_priv = dev->dev_private; |
772 | u32 pp; | 793 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; |
773 | 794 | ||
774 | if (I915_READ(PCH_PP_STATUS) & PP_ON) | 795 | if (I915_READ(PCH_PP_STATUS) & PP_ON) |
775 | return true; | 796 | return true; |
@@ -781,19 +802,20 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) | |||
781 | I915_WRITE(PCH_PP_CONTROL, pp); | 802 | I915_WRITE(PCH_PP_CONTROL, pp); |
782 | POSTING_READ(PCH_PP_CONTROL); | 803 | POSTING_READ(PCH_PP_CONTROL); |
783 | 804 | ||
784 | pp |= POWER_TARGET_ON; | 805 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; |
785 | I915_WRITE(PCH_PP_CONTROL, pp); | 806 | I915_WRITE(PCH_PP_CONTROL, pp); |
807 | POSTING_READ(PCH_PP_CONTROL); | ||
786 | 808 | ||
787 | /* Ouch. We need to wait here for some panels, like Dell e6510 | 809 | /* Ouch. We need to wait here for some panels, like Dell e6510 |
788 | * https://bugs.freedesktop.org/show_bug.cgi?id=29278i | 810 | * https://bugs.freedesktop.org/show_bug.cgi?id=29278i |
789 | */ | 811 | */ |
790 | msleep(300); | 812 | msleep(300); |
791 | 813 | ||
792 | if (wait_for(I915_READ(PCH_PP_STATUS) & PP_ON, 5000)) | 814 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, |
815 | 5000)) | ||
793 | DRM_ERROR("panel on wait timed out: 0x%08x\n", | 816 | DRM_ERROR("panel on wait timed out: 0x%08x\n", |
794 | I915_READ(PCH_PP_STATUS)); | 817 | I915_READ(PCH_PP_STATUS)); |
795 | 818 | ||
796 | pp &= ~(PANEL_UNLOCK_REGS); | ||
797 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 819 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
798 | I915_WRITE(PCH_PP_CONTROL, pp); | 820 | I915_WRITE(PCH_PP_CONTROL, pp); |
799 | POSTING_READ(PCH_PP_CONTROL); | 821 | POSTING_READ(PCH_PP_CONTROL); |
@@ -804,7 +826,8 @@ static bool ironlake_edp_panel_on (struct drm_device *dev) | |||
804 | static void ironlake_edp_panel_off (struct drm_device *dev) | 826 | static void ironlake_edp_panel_off (struct drm_device *dev) |
805 | { | 827 | { |
806 | struct drm_i915_private *dev_priv = dev->dev_private; | 828 | struct drm_i915_private *dev_priv = dev->dev_private; |
807 | u32 pp; | 829 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | |
830 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; | ||
808 | 831 | ||
809 | pp = I915_READ(PCH_PP_CONTROL); | 832 | pp = I915_READ(PCH_PP_CONTROL); |
810 | 833 | ||
@@ -815,12 +838,12 @@ static void ironlake_edp_panel_off (struct drm_device *dev) | |||
815 | 838 | ||
816 | pp &= ~POWER_TARGET_ON; | 839 | pp &= ~POWER_TARGET_ON; |
817 | I915_WRITE(PCH_PP_CONTROL, pp); | 840 | I915_WRITE(PCH_PP_CONTROL, pp); |
841 | POSTING_READ(PCH_PP_CONTROL); | ||
818 | 842 | ||
819 | if (wait_for((I915_READ(PCH_PP_STATUS) & PP_ON) == 0, 5000)) | 843 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) |
820 | DRM_ERROR("panel off wait timed out: 0x%08x\n", | 844 | DRM_ERROR("panel off wait timed out: 0x%08x\n", |
821 | I915_READ(PCH_PP_STATUS)); | 845 | I915_READ(PCH_PP_STATUS)); |
822 | 846 | ||
823 | /* Make sure VDD is enabled so DP AUX will work */ | ||
824 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 847 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
825 | I915_WRITE(PCH_PP_CONTROL, pp); | 848 | I915_WRITE(PCH_PP_CONTROL, pp); |
826 | POSTING_READ(PCH_PP_CONTROL); | 849 | POSTING_READ(PCH_PP_CONTROL); |
@@ -831,36 +854,19 @@ static void ironlake_edp_panel_off (struct drm_device *dev) | |||
831 | msleep(300); | 854 | msleep(300); |
832 | } | 855 | } |
833 | 856 | ||
834 | static void ironlake_edp_panel_vdd_on(struct drm_device *dev) | ||
835 | { | ||
836 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
837 | u32 pp; | ||
838 | |||
839 | pp = I915_READ(PCH_PP_CONTROL); | ||
840 | pp |= EDP_FORCE_VDD; | ||
841 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
842 | POSTING_READ(PCH_PP_CONTROL); | ||
843 | msleep(300); | ||
844 | } | ||
845 | |||
846 | static void ironlake_edp_panel_vdd_off(struct drm_device *dev) | ||
847 | { | ||
848 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
849 | u32 pp; | ||
850 | |||
851 | pp = I915_READ(PCH_PP_CONTROL); | ||
852 | pp &= ~EDP_FORCE_VDD; | ||
853 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
854 | POSTING_READ(PCH_PP_CONTROL); | ||
855 | msleep(300); | ||
856 | } | ||
857 | |||
858 | static void ironlake_edp_backlight_on (struct drm_device *dev) | 857 | static void ironlake_edp_backlight_on (struct drm_device *dev) |
859 | { | 858 | { |
860 | struct drm_i915_private *dev_priv = dev->dev_private; | 859 | struct drm_i915_private *dev_priv = dev->dev_private; |
861 | u32 pp; | 860 | u32 pp; |
862 | 861 | ||
863 | DRM_DEBUG_KMS("\n"); | 862 | DRM_DEBUG_KMS("\n"); |
863 | /* | ||
864 | * If we enable the backlight right away following a panel power | ||
865 | * on, we may see slight flicker as the panel syncs with the eDP | ||
866 | * link. So delay a bit to make sure the image is solid before | ||
867 | * allowing it to appear. | ||
868 | */ | ||
869 | msleep(300); | ||
864 | pp = I915_READ(PCH_PP_CONTROL); | 870 | pp = I915_READ(PCH_PP_CONTROL); |
865 | pp |= EDP_BLC_ENABLE; | 871 | pp |= EDP_BLC_ENABLE; |
866 | I915_WRITE(PCH_PP_CONTROL, pp); | 872 | I915_WRITE(PCH_PP_CONTROL, pp); |
@@ -885,8 +891,10 @@ static void ironlake_edp_pll_on(struct drm_encoder *encoder) | |||
885 | 891 | ||
886 | DRM_DEBUG_KMS("\n"); | 892 | DRM_DEBUG_KMS("\n"); |
887 | dpa_ctl = I915_READ(DP_A); | 893 | dpa_ctl = I915_READ(DP_A); |
888 | dpa_ctl &= ~DP_PLL_ENABLE; | 894 | dpa_ctl |= DP_PLL_ENABLE; |
889 | I915_WRITE(DP_A, dpa_ctl); | 895 | I915_WRITE(DP_A, dpa_ctl); |
896 | POSTING_READ(DP_A); | ||
897 | udelay(200); | ||
890 | } | 898 | } |
891 | 899 | ||
892 | static void ironlake_edp_pll_off(struct drm_encoder *encoder) | 900 | static void ironlake_edp_pll_off(struct drm_encoder *encoder) |
@@ -896,7 +904,7 @@ static void ironlake_edp_pll_off(struct drm_encoder *encoder) | |||
896 | u32 dpa_ctl; | 904 | u32 dpa_ctl; |
897 | 905 | ||
898 | dpa_ctl = I915_READ(DP_A); | 906 | dpa_ctl = I915_READ(DP_A); |
899 | dpa_ctl |= DP_PLL_ENABLE; | 907 | dpa_ctl &= ~DP_PLL_ENABLE; |
900 | I915_WRITE(DP_A, dpa_ctl); | 908 | I915_WRITE(DP_A, dpa_ctl); |
901 | POSTING_READ(DP_A); | 909 | POSTING_READ(DP_A); |
902 | udelay(200); | 910 | udelay(200); |
@@ -909,11 +917,13 @@ static void intel_dp_prepare(struct drm_encoder *encoder) | |||
909 | struct drm_i915_private *dev_priv = dev->dev_private; | 917 | struct drm_i915_private *dev_priv = dev->dev_private; |
910 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 918 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
911 | 919 | ||
912 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 920 | if (is_edp(intel_dp)) { |
913 | ironlake_edp_panel_off(dev); | ||
914 | ironlake_edp_backlight_off(dev); | 921 | ironlake_edp_backlight_off(dev); |
915 | ironlake_edp_panel_vdd_on(dev); | 922 | ironlake_edp_panel_on(intel_dp); |
916 | ironlake_edp_pll_on(encoder); | 923 | if (!is_pch_edp(intel_dp)) |
924 | ironlake_edp_pll_on(encoder); | ||
925 | else | ||
926 | ironlake_edp_pll_off(encoder); | ||
917 | } | 927 | } |
918 | if (dp_reg & DP_PORT_EN) | 928 | if (dp_reg & DP_PORT_EN) |
919 | intel_dp_link_down(intel_dp); | 929 | intel_dp_link_down(intel_dp); |
@@ -926,14 +936,13 @@ static void intel_dp_commit(struct drm_encoder *encoder) | |||
926 | 936 | ||
927 | intel_dp_start_link_train(intel_dp); | 937 | intel_dp_start_link_train(intel_dp); |
928 | 938 | ||
929 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 939 | if (is_edp(intel_dp)) |
930 | ironlake_edp_panel_on(dev); | 940 | ironlake_edp_panel_on(intel_dp); |
931 | 941 | ||
932 | intel_dp_complete_link_train(intel_dp); | 942 | intel_dp_complete_link_train(intel_dp); |
933 | 943 | ||
934 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 944 | if (is_edp(intel_dp)) |
935 | ironlake_edp_backlight_on(dev); | 945 | ironlake_edp_backlight_on(dev); |
936 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | ||
937 | } | 946 | } |
938 | 947 | ||
939 | static void | 948 | static void |
@@ -945,21 +954,21 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
945 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 954 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
946 | 955 | ||
947 | if (mode != DRM_MODE_DPMS_ON) { | 956 | if (mode != DRM_MODE_DPMS_ON) { |
948 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 957 | if (is_edp(intel_dp)) |
949 | ironlake_edp_backlight_off(dev); | 958 | ironlake_edp_backlight_off(dev); |
950 | ironlake_edp_panel_off(dev); | ||
951 | } | ||
952 | if (dp_reg & DP_PORT_EN) | 959 | if (dp_reg & DP_PORT_EN) |
953 | intel_dp_link_down(intel_dp); | 960 | intel_dp_link_down(intel_dp); |
954 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 961 | if (is_edp(intel_dp)) |
962 | ironlake_edp_panel_off(dev); | ||
963 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) | ||
955 | ironlake_edp_pll_off(encoder); | 964 | ironlake_edp_pll_off(encoder); |
956 | } else { | 965 | } else { |
957 | if (!(dp_reg & DP_PORT_EN)) { | 966 | if (!(dp_reg & DP_PORT_EN)) { |
967 | if (is_edp(intel_dp)) | ||
968 | ironlake_edp_panel_on(intel_dp); | ||
958 | intel_dp_start_link_train(intel_dp); | 969 | intel_dp_start_link_train(intel_dp); |
959 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
960 | ironlake_edp_panel_on(dev); | ||
961 | intel_dp_complete_link_train(intel_dp); | 970 | intel_dp_complete_link_train(intel_dp); |
962 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 971 | if (is_edp(intel_dp)) |
963 | ironlake_edp_backlight_on(dev); | 972 | ironlake_edp_backlight_on(dev); |
964 | } | 973 | } |
965 | } | 974 | } |
@@ -1079,11 +1088,21 @@ intel_get_adjust_train(struct intel_dp *intel_dp) | |||
1079 | } | 1088 | } |
1080 | 1089 | ||
1081 | static uint32_t | 1090 | static uint32_t |
1082 | intel_dp_signal_levels(uint8_t train_set, int lane_count) | 1091 | intel_dp_signal_levels(struct intel_dp *intel_dp) |
1083 | { | 1092 | { |
1084 | uint32_t signal_levels = 0; | 1093 | struct drm_device *dev = intel_dp->base.base.dev; |
1094 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1095 | uint32_t signal_levels = 0; | ||
1096 | u8 train_set = intel_dp->train_set[0]; | ||
1097 | u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1098 | u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; | ||
1099 | |||
1100 | if (is_edp(intel_dp)) { | ||
1101 | vswing = dev_priv->edp.vswing; | ||
1102 | preemphasis = dev_priv->edp.preemphasis; | ||
1103 | } | ||
1085 | 1104 | ||
1086 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { | 1105 | switch (vswing) { |
1087 | case DP_TRAIN_VOLTAGE_SWING_400: | 1106 | case DP_TRAIN_VOLTAGE_SWING_400: |
1088 | default: | 1107 | default: |
1089 | signal_levels |= DP_VOLTAGE_0_4; | 1108 | signal_levels |= DP_VOLTAGE_0_4; |
@@ -1098,7 +1117,7 @@ intel_dp_signal_levels(uint8_t train_set, int lane_count) | |||
1098 | signal_levels |= DP_VOLTAGE_1_2; | 1117 | signal_levels |= DP_VOLTAGE_1_2; |
1099 | break; | 1118 | break; |
1100 | } | 1119 | } |
1101 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { | 1120 | switch (preemphasis) { |
1102 | case DP_TRAIN_PRE_EMPHASIS_0: | 1121 | case DP_TRAIN_PRE_EMPHASIS_0: |
1103 | default: | 1122 | default: |
1104 | signal_levels |= DP_PRE_EMPHASIS_0; | 1123 | signal_levels |= DP_PRE_EMPHASIS_0; |
@@ -1185,6 +1204,18 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) | |||
1185 | } | 1204 | } |
1186 | 1205 | ||
1187 | static bool | 1206 | static bool |
1207 | intel_dp_aux_handshake_required(struct intel_dp *intel_dp) | ||
1208 | { | ||
1209 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1210 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1211 | |||
1212 | if (is_edp(intel_dp) && dev_priv->no_aux_handshake) | ||
1213 | return false; | ||
1214 | |||
1215 | return true; | ||
1216 | } | ||
1217 | |||
1218 | static bool | ||
1188 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1219 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1189 | uint32_t dp_reg_value, | 1220 | uint32_t dp_reg_value, |
1190 | uint8_t dp_train_pat) | 1221 | uint8_t dp_train_pat) |
@@ -1196,6 +1227,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1196 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1227 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1197 | POSTING_READ(intel_dp->output_reg); | 1228 | POSTING_READ(intel_dp->output_reg); |
1198 | 1229 | ||
1230 | if (!intel_dp_aux_handshake_required(intel_dp)) | ||
1231 | return true; | ||
1232 | |||
1199 | intel_dp_aux_native_write_1(intel_dp, | 1233 | intel_dp_aux_native_write_1(intel_dp, |
1200 | DP_TRAINING_PATTERN_SET, | 1234 | DP_TRAINING_PATTERN_SET, |
1201 | dp_train_pat); | 1235 | dp_train_pat); |
@@ -1228,13 +1262,14 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1228 | POSTING_READ(intel_dp->output_reg); | 1262 | POSTING_READ(intel_dp->output_reg); |
1229 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1263 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1230 | 1264 | ||
1231 | /* Write the link configuration data */ | 1265 | if (intel_dp_aux_handshake_required(intel_dp)) |
1232 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1266 | /* Write the link configuration data */ |
1233 | intel_dp->link_configuration, | 1267 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1234 | DP_LINK_CONFIGURATION_SIZE); | 1268 | intel_dp->link_configuration, |
1269 | DP_LINK_CONFIGURATION_SIZE); | ||
1235 | 1270 | ||
1236 | DP |= DP_PORT_EN; | 1271 | DP |= DP_PORT_EN; |
1237 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1272 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1238 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1273 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1239 | else | 1274 | else |
1240 | DP &= ~DP_LINK_TRAIN_MASK; | 1275 | DP &= ~DP_LINK_TRAIN_MASK; |
@@ -1245,15 +1280,15 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1245 | for (;;) { | 1280 | for (;;) { |
1246 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1281 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1247 | uint32_t signal_levels; | 1282 | uint32_t signal_levels; |
1248 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { | 1283 | if (IS_GEN6(dev) && is_edp(intel_dp)) { |
1249 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1284 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1250 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1285 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1251 | } else { | 1286 | } else { |
1252 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); | 1287 | signal_levels = intel_dp_signal_levels(intel_dp); |
1253 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1288 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1254 | } | 1289 | } |
1255 | 1290 | ||
1256 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1291 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1257 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | 1292 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; |
1258 | else | 1293 | else |
1259 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1294 | reg = DP | DP_LINK_TRAIN_PAT_1; |
@@ -1263,33 +1298,37 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1263 | break; | 1298 | break; |
1264 | /* Set training pattern 1 */ | 1299 | /* Set training pattern 1 */ |
1265 | 1300 | ||
1266 | udelay(100); | 1301 | udelay(500); |
1267 | if (!intel_dp_get_link_status(intel_dp)) | 1302 | if (intel_dp_aux_handshake_required(intel_dp)) { |
1268 | break; | 1303 | break; |
1304 | } else { | ||
1305 | if (!intel_dp_get_link_status(intel_dp)) | ||
1306 | break; | ||
1269 | 1307 | ||
1270 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | 1308 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { |
1271 | clock_recovery = true; | 1309 | clock_recovery = true; |
1272 | break; | ||
1273 | } | ||
1274 | |||
1275 | /* Check to see if we've tried the max voltage */ | ||
1276 | for (i = 0; i < intel_dp->lane_count; i++) | ||
1277 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | ||
1278 | break; | 1310 | break; |
1279 | if (i == intel_dp->lane_count) | 1311 | } |
1280 | break; | ||
1281 | 1312 | ||
1282 | /* Check to see if we've tried the same voltage 5 times */ | 1313 | /* Check to see if we've tried the max voltage */ |
1283 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 1314 | for (i = 0; i < intel_dp->lane_count; i++) |
1284 | ++tries; | 1315 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1285 | if (tries == 5) | 1316 | break; |
1317 | if (i == intel_dp->lane_count) | ||
1286 | break; | 1318 | break; |
1287 | } else | ||
1288 | tries = 0; | ||
1289 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1290 | 1319 | ||
1291 | /* Compute new intel_dp->train_set as requested by target */ | 1320 | /* Check to see if we've tried the same voltage 5 times */ |
1292 | intel_get_adjust_train(intel_dp); | 1321 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1322 | ++tries; | ||
1323 | if (tries == 5) | ||
1324 | break; | ||
1325 | } else | ||
1326 | tries = 0; | ||
1327 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1328 | |||
1329 | /* Compute new intel_dp->train_set as requested by target */ | ||
1330 | intel_get_adjust_train(intel_dp); | ||
1331 | } | ||
1293 | } | 1332 | } |
1294 | 1333 | ||
1295 | intel_dp->DP = DP; | 1334 | intel_dp->DP = DP; |
@@ -1312,15 +1351,15 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1312 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ | 1351 | /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ |
1313 | uint32_t signal_levels; | 1352 | uint32_t signal_levels; |
1314 | 1353 | ||
1315 | if (IS_GEN6(dev) && IS_eDP(intel_dp)) { | 1354 | if (IS_GEN6(dev) && is_edp(intel_dp)) { |
1316 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1355 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1317 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1356 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1318 | } else { | 1357 | } else { |
1319 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); | 1358 | signal_levels = intel_dp_signal_levels(intel_dp); |
1320 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1359 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1321 | } | 1360 | } |
1322 | 1361 | ||
1323 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1362 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1324 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | 1363 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; |
1325 | else | 1364 | else |
1326 | reg = DP | DP_LINK_TRAIN_PAT_2; | 1365 | reg = DP | DP_LINK_TRAIN_PAT_2; |
@@ -1330,25 +1369,29 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1330 | DP_TRAINING_PATTERN_2)) | 1369 | DP_TRAINING_PATTERN_2)) |
1331 | break; | 1370 | break; |
1332 | 1371 | ||
1333 | udelay(400); | 1372 | udelay(500); |
1334 | if (!intel_dp_get_link_status(intel_dp)) | ||
1335 | break; | ||
1336 | 1373 | ||
1337 | if (intel_channel_eq_ok(intel_dp)) { | 1374 | if (!intel_dp_aux_handshake_required(intel_dp)) { |
1338 | channel_eq = true; | ||
1339 | break; | 1375 | break; |
1340 | } | 1376 | } else { |
1377 | if (!intel_dp_get_link_status(intel_dp)) | ||
1378 | break; | ||
1341 | 1379 | ||
1342 | /* Try 5 times */ | 1380 | if (intel_channel_eq_ok(intel_dp)) { |
1343 | if (tries > 5) | 1381 | channel_eq = true; |
1344 | break; | 1382 | break; |
1383 | } | ||
1345 | 1384 | ||
1346 | /* Compute new intel_dp->train_set as requested by target */ | 1385 | /* Try 5 times */ |
1347 | intel_get_adjust_train(intel_dp); | 1386 | if (tries > 5) |
1348 | ++tries; | 1387 | break; |
1349 | } | ||
1350 | 1388 | ||
1351 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) | 1389 | /* Compute new intel_dp->train_set as requested by target */ |
1390 | intel_get_adjust_train(intel_dp); | ||
1391 | ++tries; | ||
1392 | } | ||
1393 | } | ||
1394 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | ||
1352 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1395 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1353 | else | 1396 | else |
1354 | reg = DP | DP_LINK_TRAIN_OFF; | 1397 | reg = DP | DP_LINK_TRAIN_OFF; |
@@ -1368,14 +1411,14 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1368 | 1411 | ||
1369 | DRM_DEBUG_KMS("\n"); | 1412 | DRM_DEBUG_KMS("\n"); |
1370 | 1413 | ||
1371 | if (IS_eDP(intel_dp)) { | 1414 | if (is_edp(intel_dp)) { |
1372 | DP &= ~DP_PLL_ENABLE; | 1415 | DP &= ~DP_PLL_ENABLE; |
1373 | I915_WRITE(intel_dp->output_reg, DP); | 1416 | I915_WRITE(intel_dp->output_reg, DP); |
1374 | POSTING_READ(intel_dp->output_reg); | 1417 | POSTING_READ(intel_dp->output_reg); |
1375 | udelay(100); | 1418 | udelay(100); |
1376 | } | 1419 | } |
1377 | 1420 | ||
1378 | if (HAS_PCH_CPT(dev) && !IS_eDP(intel_dp)) { | 1421 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { |
1379 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1422 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1380 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); | 1423 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
1381 | } else { | 1424 | } else { |
@@ -1386,7 +1429,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1386 | 1429 | ||
1387 | msleep(17); | 1430 | msleep(17); |
1388 | 1431 | ||
1389 | if (IS_eDP(intel_dp)) | 1432 | if (is_edp(intel_dp)) |
1390 | DP |= DP_LINK_TRAIN_OFF; | 1433 | DP |= DP_LINK_TRAIN_OFF; |
1391 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 1434 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1392 | POSTING_READ(intel_dp->output_reg); | 1435 | POSTING_READ(intel_dp->output_reg); |
@@ -1424,9 +1467,10 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
1424 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 1467 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
1425 | enum drm_connector_status status; | 1468 | enum drm_connector_status status; |
1426 | 1469 | ||
1427 | /* Panel needs power for AUX to work */ | 1470 | /* Can't disconnect eDP */ |
1428 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | 1471 | if (is_edp(intel_dp)) |
1429 | ironlake_edp_panel_vdd_on(connector->dev); | 1472 | return connector_status_connected; |
1473 | |||
1430 | status = connector_status_disconnected; | 1474 | status = connector_status_disconnected; |
1431 | if (intel_dp_aux_native_read(intel_dp, | 1475 | if (intel_dp_aux_native_read(intel_dp, |
1432 | 0x000, intel_dp->dpcd, | 1476 | 0x000, intel_dp->dpcd, |
@@ -1437,8 +1481,6 @@ ironlake_dp_detect(struct drm_connector *connector) | |||
1437 | } | 1481 | } |
1438 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], | 1482 | DRM_DEBUG_KMS("DPCD: %hx%hx%hx%hx\n", intel_dp->dpcd[0], |
1439 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); | 1483 | intel_dp->dpcd[1], intel_dp->dpcd[2], intel_dp->dpcd[3]); |
1440 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) | ||
1441 | ironlake_edp_panel_vdd_off(connector->dev); | ||
1442 | return status; | 1484 | return status; |
1443 | } | 1485 | } |
1444 | 1486 | ||
@@ -1504,8 +1546,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1504 | 1546 | ||
1505 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); | 1547 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); |
1506 | if (ret) { | 1548 | if (ret) { |
1507 | if ((IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) && | 1549 | if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { |
1508 | !dev_priv->panel_fixed_mode) { | ||
1509 | struct drm_display_mode *newmode; | 1550 | struct drm_display_mode *newmode; |
1510 | list_for_each_entry(newmode, &connector->probed_modes, | 1551 | list_for_each_entry(newmode, &connector->probed_modes, |
1511 | head) { | 1552 | head) { |
@@ -1521,7 +1562,7 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1521 | } | 1562 | } |
1522 | 1563 | ||
1523 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 1564 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1524 | if (IS_eDP(intel_dp) || IS_PCH_eDP(intel_dp)) { | 1565 | if (is_edp(intel_dp)) { |
1525 | if (dev_priv->panel_fixed_mode != NULL) { | 1566 | if (dev_priv->panel_fixed_mode != NULL) { |
1526 | struct drm_display_mode *mode; | 1567 | struct drm_display_mode *mode; |
1527 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 1568 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); |
@@ -1651,7 +1692,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1651 | if (intel_dpd_is_edp(dev)) | 1692 | if (intel_dpd_is_edp(dev)) |
1652 | intel_dp->is_pch_edp = true; | 1693 | intel_dp->is_pch_edp = true; |
1653 | 1694 | ||
1654 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { | 1695 | if (output_reg == DP_A || is_pch_edp(intel_dp)) { |
1655 | type = DRM_MODE_CONNECTOR_eDP; | 1696 | type = DRM_MODE_CONNECTOR_eDP; |
1656 | intel_encoder->type = INTEL_OUTPUT_EDP; | 1697 | intel_encoder->type = INTEL_OUTPUT_EDP; |
1657 | } else { | 1698 | } else { |
@@ -1672,7 +1713,7 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1672 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 1713 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1673 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 1714 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1674 | 1715 | ||
1675 | if (IS_eDP(intel_dp)) | 1716 | if (is_edp(intel_dp)) |
1676 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 1717 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
1677 | 1718 | ||
1678 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 1719 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); |
@@ -1717,9 +1758,29 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1717 | 1758 | ||
1718 | intel_dp_i2c_init(intel_dp, intel_connector, name); | 1759 | intel_dp_i2c_init(intel_dp, intel_connector, name); |
1719 | 1760 | ||
1761 | /* Cache some DPCD data in the eDP case */ | ||
1762 | if (is_edp(intel_dp)) { | ||
1763 | int ret; | ||
1764 | bool was_on; | ||
1765 | |||
1766 | was_on = ironlake_edp_panel_on(intel_dp); | ||
1767 | ret = intel_dp_aux_native_read(intel_dp, DP_DPCD_REV, | ||
1768 | intel_dp->dpcd, | ||
1769 | sizeof(intel_dp->dpcd)); | ||
1770 | if (ret == sizeof(intel_dp->dpcd)) { | ||
1771 | if (intel_dp->dpcd[0] >= 0x11) | ||
1772 | dev_priv->no_aux_handshake = intel_dp->dpcd[3] & | ||
1773 | DP_NO_AUX_HANDSHAKE_LINK_TRAINING; | ||
1774 | } else { | ||
1775 | DRM_ERROR("failed to retrieve link info\n"); | ||
1776 | } | ||
1777 | if (!was_on) | ||
1778 | ironlake_edp_panel_off(dev); | ||
1779 | } | ||
1780 | |||
1720 | intel_encoder->hot_plug = intel_dp_hot_plug; | 1781 | intel_encoder->hot_plug = intel_dp_hot_plug; |
1721 | 1782 | ||
1722 | if (output_reg == DP_A || IS_PCH_eDP(intel_dp)) { | 1783 | if (is_edp(intel_dp)) { |
1723 | /* initialize panel mode from VBT if available for eDP */ | 1784 | /* initialize panel mode from VBT if available for eDP */ |
1724 | if (dev_priv->lfp_lvds_vbt_mode) { | 1785 | if (dev_priv->lfp_lvds_vbt_mode) { |
1725 | dev_priv->panel_fixed_mode = | 1786 | dev_priv->panel_fixed_mode = |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 40e99bf27ff7..0581e5e5ac55 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -209,9 +209,9 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg); | |||
209 | void | 209 | void |
210 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 210 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
211 | struct drm_display_mode *adjusted_mode); | 211 | struct drm_display_mode *adjusted_mode); |
212 | extern bool intel_pch_has_edp(struct drm_crtc *crtc); | ||
213 | extern bool intel_dpd_is_edp(struct drm_device *dev); | 212 | extern bool intel_dpd_is_edp(struct drm_device *dev); |
214 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); | 213 | extern void intel_edp_link_config (struct intel_encoder *, int *, int *); |
214 | extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder); | ||
215 | 215 | ||
216 | /* intel_panel.c */ | 216 | /* intel_panel.c */ |
217 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, | 217 | extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode, |
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c index 521622b9be7a..af2a1dddc28e 100644 --- a/drivers/gpu/drm/i915/intel_fb.c +++ b/drivers/gpu/drm/i915/intel_fb.c | |||
@@ -225,7 +225,7 @@ static void intel_fbdev_destroy(struct drm_device *dev, | |||
225 | 225 | ||
226 | drm_framebuffer_cleanup(&ifb->base); | 226 | drm_framebuffer_cleanup(&ifb->base); |
227 | if (ifb->obj) { | 227 | if (ifb->obj) { |
228 | drm_gem_object_unreference(ifb->obj); | 228 | drm_gem_object_unreference_unlocked(ifb->obj); |
229 | ifb->obj = NULL; | 229 | ifb->obj = NULL; |
230 | } | 230 | } |
231 | } | 231 | } |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index a49e791db0b0..83a389e44543 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -23,6 +23,9 @@ | |||
23 | #ifndef _DRM_DP_HELPER_H_ | 23 | #ifndef _DRM_DP_HELPER_H_ |
24 | #define _DRM_DP_HELPER_H_ | 24 | #define _DRM_DP_HELPER_H_ |
25 | 25 | ||
26 | #include <linux/types.h> | ||
27 | #include <linux/i2c.h> | ||
28 | |||
26 | /* From the VESA DisplayPort spec */ | 29 | /* From the VESA DisplayPort spec */ |
27 | 30 | ||
28 | #define AUX_NATIVE_WRITE 0x8 | 31 | #define AUX_NATIVE_WRITE 0x8 |