diff options
26 files changed, 1245 insertions, 432 deletions
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c index 85151019dde1..80a7ed0a7df5 100644 --- a/drivers/char/agp/intel-gtt.c +++ b/drivers/char/agp/intel-gtt.c | |||
@@ -923,6 +923,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem, | |||
923 | { | 923 | { |
924 | int ret = -EINVAL; | 924 | int ret = -EINVAL; |
925 | 925 | ||
926 | if (intel_private.base.do_idle_maps) | ||
927 | return -ENODEV; | ||
928 | |||
926 | if (intel_private.clear_fake_agp) { | 929 | if (intel_private.clear_fake_agp) { |
927 | int start = intel_private.base.stolen_size / PAGE_SIZE; | 930 | int start = intel_private.base.stolen_size / PAGE_SIZE; |
928 | int end = intel_private.base.gtt_mappable_entries; | 931 | int end = intel_private.base.gtt_mappable_entries; |
@@ -985,6 +988,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem, | |||
985 | if (mem->page_count == 0) | 988 | if (mem->page_count == 0) |
986 | return 0; | 989 | return 0; |
987 | 990 | ||
991 | if (intel_private.base.do_idle_maps) | ||
992 | return -ENODEV; | ||
993 | |||
988 | intel_gtt_clear_range(pg_start, mem->page_count); | 994 | intel_gtt_clear_range(pg_start, mem->page_count); |
989 | 995 | ||
990 | if (intel_private.base.needs_dmar) { | 996 | if (intel_private.base.needs_dmar) { |
@@ -1177,6 +1183,25 @@ static void gen6_cleanup(void) | |||
1177 | { | 1183 | { |
1178 | } | 1184 | } |
1179 | 1185 | ||
1186 | /* Certain Gen5 chipsets require require idling the GPU before | ||
1187 | * unmapping anything from the GTT when VT-d is enabled. | ||
1188 | */ | ||
1189 | extern int intel_iommu_gfx_mapped; | ||
1190 | static inline int needs_idle_maps(void) | ||
1191 | { | ||
1192 | const unsigned short gpu_devid = intel_private.pcidev->device; | ||
1193 | |||
1194 | /* Query intel_iommu to see if we need the workaround. Presumably that | ||
1195 | * was loaded first. | ||
1196 | */ | ||
1197 | if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || | ||
1198 | gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) && | ||
1199 | intel_iommu_gfx_mapped) | ||
1200 | return 1; | ||
1201 | |||
1202 | return 0; | ||
1203 | } | ||
1204 | |||
1180 | static int i9xx_setup(void) | 1205 | static int i9xx_setup(void) |
1181 | { | 1206 | { |
1182 | u32 reg_addr; | 1207 | u32 reg_addr; |
@@ -1211,6 +1236,9 @@ static int i9xx_setup(void) | |||
1211 | intel_private.gtt_bus_addr = reg_addr + gtt_offset; | 1236 | intel_private.gtt_bus_addr = reg_addr + gtt_offset; |
1212 | } | 1237 | } |
1213 | 1238 | ||
1239 | if (needs_idle_maps()); | ||
1240 | intel_private.base.do_idle_maps = 1; | ||
1241 | |||
1214 | intel_i9xx_setup_flush(); | 1242 | intel_i9xx_setup_flush(); |
1215 | 1243 | ||
1216 | return 0; | 1244 | return 0; |
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index d76da389f521..2eac955dee18 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -2035,7 +2035,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | |||
2035 | spin_lock_init(&dev_priv->error_lock); | 2035 | spin_lock_init(&dev_priv->error_lock); |
2036 | spin_lock_init(&dev_priv->rps_lock); | 2036 | spin_lock_init(&dev_priv->rps_lock); |
2037 | 2037 | ||
2038 | if (IS_MOBILE(dev) || !IS_GEN2(dev)) | 2038 | if (IS_IVYBRIDGE(dev)) |
2039 | dev_priv->num_pipe = 3; | ||
2040 | else if (IS_MOBILE(dev) || !IS_GEN2(dev)) | ||
2039 | dev_priv->num_pipe = 2; | 2041 | dev_priv->num_pipe = 2; |
2040 | else | 2042 | else |
2041 | dev_priv->num_pipe = 1; | 2043 | dev_priv->num_pipe = 1; |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index c96b019a3b76..4c8d681c2151 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -79,11 +79,11 @@ MODULE_PARM_DESC(lvds_downclock, | |||
79 | "Use panel (LVDS/eDP) downclocking for power savings " | 79 | "Use panel (LVDS/eDP) downclocking for power savings " |
80 | "(default: false)"); | 80 | "(default: false)"); |
81 | 81 | ||
82 | unsigned int i915_panel_use_ssc __read_mostly = 1; | 82 | unsigned int i915_panel_use_ssc __read_mostly = -1; |
83 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); | 83 | module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); |
84 | MODULE_PARM_DESC(lvds_use_ssc, | 84 | MODULE_PARM_DESC(lvds_use_ssc, |
85 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " | 85 | "Use Spread Spectrum Clock with panels [LVDS/eDP] " |
86 | "(default: true)"); | 86 | "(default: auto from VBT)"); |
87 | 87 | ||
88 | int i915_vbt_sdvo_panel_type __read_mostly = -1; | 88 | int i915_vbt_sdvo_panel_type __read_mostly = -1; |
89 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); | 89 | module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); |
@@ -471,6 +471,9 @@ static int i915_drm_thaw(struct drm_device *dev) | |||
471 | error = i915_gem_init_ringbuffer(dev); | 471 | error = i915_gem_init_ringbuffer(dev); |
472 | mutex_unlock(&dev->struct_mutex); | 472 | mutex_unlock(&dev->struct_mutex); |
473 | 473 | ||
474 | if (HAS_PCH_SPLIT(dev)) | ||
475 | ironlake_init_pch_refclk(dev); | ||
476 | |||
474 | drm_mode_config_reset(dev); | 477 | drm_mode_config_reset(dev); |
475 | drm_irq_install(dev); | 478 | drm_irq_install(dev); |
476 | 479 | ||
@@ -895,3 +898,43 @@ module_exit(i915_exit); | |||
895 | MODULE_AUTHOR(DRIVER_AUTHOR); | 898 | MODULE_AUTHOR(DRIVER_AUTHOR); |
896 | MODULE_DESCRIPTION(DRIVER_DESC); | 899 | MODULE_DESCRIPTION(DRIVER_DESC); |
897 | MODULE_LICENSE("GPL and additional rights"); | 900 | MODULE_LICENSE("GPL and additional rights"); |
901 | |||
902 | /* We give fast paths for the really cool registers */ | ||
903 | #define NEEDS_FORCE_WAKE(dev_priv, reg) \ | ||
904 | (((dev_priv)->info->gen >= 6) && \ | ||
905 | ((reg) < 0x40000) && \ | ||
906 | ((reg) != FORCEWAKE)) | ||
907 | |||
908 | #define __i915_read(x, y) \ | ||
909 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | ||
910 | u##x val = 0; \ | ||
911 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
912 | gen6_gt_force_wake_get(dev_priv); \ | ||
913 | val = read##y(dev_priv->regs + reg); \ | ||
914 | gen6_gt_force_wake_put(dev_priv); \ | ||
915 | } else { \ | ||
916 | val = read##y(dev_priv->regs + reg); \ | ||
917 | } \ | ||
918 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | ||
919 | return val; \ | ||
920 | } | ||
921 | |||
922 | __i915_read(8, b) | ||
923 | __i915_read(16, w) | ||
924 | __i915_read(32, l) | ||
925 | __i915_read(64, q) | ||
926 | #undef __i915_read | ||
927 | |||
928 | #define __i915_write(x, y) \ | ||
929 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | ||
930 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | ||
931 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
932 | __gen6_gt_wait_for_fifo(dev_priv); \ | ||
933 | } \ | ||
934 | write##y(val, dev_priv->regs + reg); \ | ||
935 | } | ||
936 | __i915_write(8, b) | ||
937 | __i915_write(16, w) | ||
938 | __i915_write(32, l) | ||
939 | __i915_write(64, q) | ||
940 | #undef __i915_write | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 15c0ca58ad8b..06a37f4fd74b 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -139,7 +139,6 @@ struct sdvo_device_mapping { | |||
139 | u8 slave_addr; | 139 | u8 slave_addr; |
140 | u8 dvo_wiring; | 140 | u8 dvo_wiring; |
141 | u8 i2c_pin; | 141 | u8 i2c_pin; |
142 | u8 i2c_speed; | ||
143 | u8 ddc_pin; | 142 | u8 ddc_pin; |
144 | }; | 143 | }; |
145 | 144 | ||
@@ -349,7 +348,6 @@ typedef struct drm_i915_private { | |||
349 | /* LVDS info */ | 348 | /* LVDS info */ |
350 | int backlight_level; /* restore backlight to this value */ | 349 | int backlight_level; /* restore backlight to this value */ |
351 | bool backlight_enabled; | 350 | bool backlight_enabled; |
352 | struct drm_display_mode *panel_fixed_mode; | ||
353 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ | 351 | struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */ |
354 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ | 352 | struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */ |
355 | 353 | ||
@@ -359,6 +357,7 @@ typedef struct drm_i915_private { | |||
359 | unsigned int lvds_vbt:1; | 357 | unsigned int lvds_vbt:1; |
360 | unsigned int int_crt_support:1; | 358 | unsigned int int_crt_support:1; |
361 | unsigned int lvds_use_ssc:1; | 359 | unsigned int lvds_use_ssc:1; |
360 | unsigned int display_clock_mode:1; | ||
362 | int lvds_ssc_freq; | 361 | int lvds_ssc_freq; |
363 | struct { | 362 | struct { |
364 | int rate; | 363 | int rate; |
@@ -674,10 +673,9 @@ typedef struct drm_i915_private { | |||
674 | unsigned int lvds_border_bits; | 673 | unsigned int lvds_border_bits; |
675 | /* Panel fitter placement and size for Ironlake+ */ | 674 | /* Panel fitter placement and size for Ironlake+ */ |
676 | u32 pch_pf_pos, pch_pf_size; | 675 | u32 pch_pf_pos, pch_pf_size; |
677 | int panel_t3, panel_t12; | ||
678 | 676 | ||
679 | struct drm_crtc *plane_to_crtc_mapping[2]; | 677 | struct drm_crtc *plane_to_crtc_mapping[3]; |
680 | struct drm_crtc *pipe_to_crtc_mapping[2]; | 678 | struct drm_crtc *pipe_to_crtc_mapping[3]; |
681 | wait_queue_head_t pending_flip_queue; | 679 | wait_queue_head_t pending_flip_queue; |
682 | bool flip_pending_is_done; | 680 | bool flip_pending_is_done; |
683 | 681 | ||
@@ -1303,6 +1301,7 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state); | |||
1303 | extern bool intel_fbc_enabled(struct drm_device *dev); | 1301 | extern bool intel_fbc_enabled(struct drm_device *dev); |
1304 | extern void intel_disable_fbc(struct drm_device *dev); | 1302 | extern void intel_disable_fbc(struct drm_device *dev); |
1305 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); | 1303 | extern bool ironlake_set_drps(struct drm_device *dev, u8 val); |
1304 | extern void ironlake_init_pch_refclk(struct drm_device *dev); | ||
1306 | extern void ironlake_enable_rc6(struct drm_device *dev); | 1305 | extern void ironlake_enable_rc6(struct drm_device *dev); |
1307 | extern void gen6_set_rps(struct drm_device *dev, u8 val); | 1306 | extern void gen6_set_rps(struct drm_device *dev, u8 val); |
1308 | extern void intel_detect_pch(struct drm_device *dev); | 1307 | extern void intel_detect_pch(struct drm_device *dev); |
@@ -1356,18 +1355,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv); | |||
1356 | ((reg) != FORCEWAKE)) | 1355 | ((reg) != FORCEWAKE)) |
1357 | 1356 | ||
1358 | #define __i915_read(x, y) \ | 1357 | #define __i915_read(x, y) \ |
1359 | static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ | 1358 | u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg); |
1360 | u##x val = 0; \ | ||
1361 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
1362 | gen6_gt_force_wake_get(dev_priv); \ | ||
1363 | val = read##y(dev_priv->regs + reg); \ | ||
1364 | gen6_gt_force_wake_put(dev_priv); \ | ||
1365 | } else { \ | ||
1366 | val = read##y(dev_priv->regs + reg); \ | ||
1367 | } \ | ||
1368 | trace_i915_reg_rw(false, reg, val, sizeof(val)); \ | ||
1369 | return val; \ | ||
1370 | } | ||
1371 | 1359 | ||
1372 | __i915_read(8, b) | 1360 | __i915_read(8, b) |
1373 | __i915_read(16, w) | 1361 | __i915_read(16, w) |
@@ -1376,13 +1364,8 @@ __i915_read(64, q) | |||
1376 | #undef __i915_read | 1364 | #undef __i915_read |
1377 | 1365 | ||
1378 | #define __i915_write(x, y) \ | 1366 | #define __i915_write(x, y) \ |
1379 | static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ | 1367 | void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val); |
1380 | trace_i915_reg_rw(true, reg, val, sizeof(val)); \ | 1368 | |
1381 | if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ | ||
1382 | __gen6_gt_wait_for_fifo(dev_priv); \ | ||
1383 | } \ | ||
1384 | write##y(val, dev_priv->regs + reg); \ | ||
1385 | } | ||
1386 | __i915_write(8, b) | 1369 | __i915_write(8, b) |
1387 | __i915_write(16, w) | 1370 | __i915_write(16, w) |
1388 | __i915_write(32, l) | 1371 | __i915_write(32, l) |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index f0f885f44b87..6651c36b6e8a 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -195,6 +195,8 @@ i915_gem_create(struct drm_file *file, | |||
195 | u32 handle; | 195 | u32 handle; |
196 | 196 | ||
197 | size = roundup(size, PAGE_SIZE); | 197 | size = roundup(size, PAGE_SIZE); |
198 | if (size == 0) | ||
199 | return -EINVAL; | ||
198 | 200 | ||
199 | /* Allocate the new object */ | 201 | /* Allocate the new object */ |
200 | obj = i915_gem_alloc_object(dev, size); | 202 | obj = i915_gem_alloc_object(dev, size); |
@@ -800,11 +802,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, | |||
800 | if (IS_ERR(page)) | 802 | if (IS_ERR(page)) |
801 | return PTR_ERR(page); | 803 | return PTR_ERR(page); |
802 | 804 | ||
803 | vaddr = kmap_atomic(page, KM_USER0); | 805 | vaddr = kmap_atomic(page); |
804 | ret = __copy_from_user_inatomic(vaddr + page_offset, | 806 | ret = __copy_from_user_inatomic(vaddr + page_offset, |
805 | user_data, | 807 | user_data, |
806 | page_length); | 808 | page_length); |
807 | kunmap_atomic(vaddr, KM_USER0); | 809 | kunmap_atomic(vaddr); |
808 | 810 | ||
809 | set_page_dirty(page); | 811 | set_page_dirty(page); |
810 | mark_page_accessed(page); | 812 | mark_page_accessed(page); |
@@ -1476,7 +1478,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj, | |||
1476 | obj->pages[i] = page; | 1478 | obj->pages[i] = page; |
1477 | } | 1479 | } |
1478 | 1480 | ||
1479 | if (obj->tiling_mode != I915_TILING_NONE) | 1481 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
1480 | i915_gem_object_do_bit_17_swizzle(obj); | 1482 | i915_gem_object_do_bit_17_swizzle(obj); |
1481 | 1483 | ||
1482 | return 0; | 1484 | return 0; |
@@ -1498,7 +1500,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj) | |||
1498 | 1500 | ||
1499 | BUG_ON(obj->madv == __I915_MADV_PURGED); | 1501 | BUG_ON(obj->madv == __I915_MADV_PURGED); |
1500 | 1502 | ||
1501 | if (obj->tiling_mode != I915_TILING_NONE) | 1503 | if (i915_gem_object_needs_bit17_swizzle(obj)) |
1502 | i915_gem_object_save_bit_17_swizzle(obj); | 1504 | i915_gem_object_save_bit_17_swizzle(obj); |
1503 | 1505 | ||
1504 | if (obj->madv == I915_MADV_DONTNEED) | 1506 | if (obj->madv == I915_MADV_DONTNEED) |
@@ -2191,14 +2193,8 @@ int | |||
2191 | i915_gpu_idle(struct drm_device *dev) | 2193 | i915_gpu_idle(struct drm_device *dev) |
2192 | { | 2194 | { |
2193 | drm_i915_private_t *dev_priv = dev->dev_private; | 2195 | drm_i915_private_t *dev_priv = dev->dev_private; |
2194 | bool lists_empty; | ||
2195 | int ret, i; | 2196 | int ret, i; |
2196 | 2197 | ||
2197 | lists_empty = (list_empty(&dev_priv->mm.flushing_list) && | ||
2198 | list_empty(&dev_priv->mm.active_list)); | ||
2199 | if (lists_empty) | ||
2200 | return 0; | ||
2201 | |||
2202 | /* Flush everything onto the inactive list. */ | 2198 | /* Flush everything onto the inactive list. */ |
2203 | for (i = 0; i < I915_NUM_RINGS; i++) { | 2199 | for (i = 0; i < I915_NUM_RINGS; i++) { |
2204 | ret = i915_ring_idle(&dev_priv->ring[i]); | 2200 | ret = i915_ring_idle(&dev_priv->ring[i]); |
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c index ac908757e176..cc93cac242d6 100644 --- a/drivers/gpu/drm/i915/i915_gem_debug.c +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | |||
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) | |||
157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { | 157 | for (page = 0; page < obj->size / PAGE_SIZE; page++) { |
158 | int i; | 158 | int i; |
159 | 159 | ||
160 | backing_map = kmap_atomic(obj->pages[page], KM_USER0); | 160 | backing_map = kmap_atomic(obj->pages[page]); |
161 | 161 | ||
162 | if (backing_map == NULL) { | 162 | if (backing_map == NULL) { |
163 | DRM_ERROR("failed to map backing page\n"); | 163 | DRM_ERROR("failed to map backing page\n"); |
@@ -181,13 +181,13 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle) | |||
181 | } | 181 | } |
182 | } | 182 | } |
183 | } | 183 | } |
184 | kunmap_atomic(backing_map, KM_USER0); | 184 | kunmap_atomic(backing_map); |
185 | backing_map = NULL; | 185 | backing_map = NULL; |
186 | } | 186 | } |
187 | 187 | ||
188 | out: | 188 | out: |
189 | if (backing_map != NULL) | 189 | if (backing_map != NULL) |
190 | kunmap_atomic(backing_map, KM_USER0); | 190 | kunmap_atomic(backing_map); |
191 | iounmap(gtt_mapping); | 191 | iounmap(gtt_mapping); |
192 | 192 | ||
193 | /* give syslog time to catch up */ | 193 | /* give syslog time to catch up */ |
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index 7a709cd8d543..6042c5e6d278 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c | |||
@@ -49,6 +49,28 @@ static unsigned int cache_level_to_agp_type(struct drm_device *dev, | |||
49 | } | 49 | } |
50 | } | 50 | } |
51 | 51 | ||
52 | static bool do_idling(struct drm_i915_private *dev_priv) | ||
53 | { | ||
54 | bool ret = dev_priv->mm.interruptible; | ||
55 | |||
56 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) { | ||
57 | dev_priv->mm.interruptible = false; | ||
58 | if (i915_gpu_idle(dev_priv->dev)) { | ||
59 | DRM_ERROR("Couldn't idle GPU\n"); | ||
60 | /* Wait a bit, in hopes it avoids the hang */ | ||
61 | udelay(10); | ||
62 | } | ||
63 | } | ||
64 | |||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible) | ||
69 | { | ||
70 | if (unlikely(dev_priv->mm.gtt->do_idle_maps)) | ||
71 | dev_priv->mm.interruptible = interruptible; | ||
72 | } | ||
73 | |||
52 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) | 74 | void i915_gem_restore_gtt_mappings(struct drm_device *dev) |
53 | { | 75 | { |
54 | struct drm_i915_private *dev_priv = dev->dev_private; | 76 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -117,6 +139,12 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj, | |||
117 | 139 | ||
118 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | 140 | void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) |
119 | { | 141 | { |
142 | struct drm_device *dev = obj->base.dev; | ||
143 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
144 | bool interruptible; | ||
145 | |||
146 | interruptible = do_idling(dev_priv); | ||
147 | |||
120 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, | 148 | intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT, |
121 | obj->base.size >> PAGE_SHIFT); | 149 | obj->base.size >> PAGE_SHIFT); |
122 | 150 | ||
@@ -124,4 +152,6 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj) | |||
124 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); | 152 | intel_gtt_unmap_memory(obj->sg_list, obj->num_sg); |
125 | obj->sg_list = NULL; | 153 | obj->sg_list = NULL; |
126 | } | 154 | } |
155 | |||
156 | undo_idling(dev_priv, interruptible); | ||
127 | } | 157 | } |
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c index 99c4faa59d8f..31d334d9d9da 100644 --- a/drivers/gpu/drm/i915/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | |||
@@ -92,7 +92,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | |||
92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | 92 | uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; |
93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | 93 | uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; |
94 | 94 | ||
95 | if (INTEL_INFO(dev)->gen >= 5) { | 95 | if (INTEL_INFO(dev)->gen >= 6) { |
96 | swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
97 | swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
98 | } else if (IS_GEN5(dev)) { | ||
96 | /* On Ironlake whatever DRAM config, GPU always do | 99 | /* On Ironlake whatever DRAM config, GPU always do |
97 | * same swizzling setup. | 100 | * same swizzling setup. |
98 | */ | 101 | */ |
@@ -440,14 +443,9 @@ i915_gem_swizzle_page(struct page *page) | |||
440 | void | 443 | void |
441 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) | 444 | i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) |
442 | { | 445 | { |
443 | struct drm_device *dev = obj->base.dev; | ||
444 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
445 | int page_count = obj->base.size >> PAGE_SHIFT; | 446 | int page_count = obj->base.size >> PAGE_SHIFT; |
446 | int i; | 447 | int i; |
447 | 448 | ||
448 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | ||
449 | return; | ||
450 | |||
451 | if (obj->bit_17 == NULL) | 449 | if (obj->bit_17 == NULL) |
452 | return; | 450 | return; |
453 | 451 | ||
@@ -464,14 +462,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj) | |||
464 | void | 462 | void |
465 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) | 463 | i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj) |
466 | { | 464 | { |
467 | struct drm_device *dev = obj->base.dev; | ||
468 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
469 | int page_count = obj->base.size >> PAGE_SHIFT; | 465 | int page_count = obj->base.size >> PAGE_SHIFT; |
470 | int i; | 466 | int i; |
471 | 467 | ||
472 | if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17) | ||
473 | return; | ||
474 | |||
475 | if (obj->bit_17 == NULL) { | 468 | if (obj->bit_17 == NULL) { |
476 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * | 469 | obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) * |
477 | sizeof(long), GFP_KERNEL); | 470 | sizeof(long), GFP_KERNEL); |
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 012732b6ec25..9ee2729fe5c6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c | |||
@@ -383,6 +383,7 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
383 | pm_iir = dev_priv->pm_iir; | 383 | pm_iir = dev_priv->pm_iir; |
384 | dev_priv->pm_iir = 0; | 384 | dev_priv->pm_iir = 0; |
385 | pm_imr = I915_READ(GEN6_PMIMR); | 385 | pm_imr = I915_READ(GEN6_PMIMR); |
386 | I915_WRITE(GEN6_PMIMR, 0); | ||
386 | spin_unlock_irq(&dev_priv->rps_lock); | 387 | spin_unlock_irq(&dev_priv->rps_lock); |
387 | 388 | ||
388 | if (!pm_iir) | 389 | if (!pm_iir) |
@@ -420,7 +421,6 @@ static void gen6_pm_rps_work(struct work_struct *work) | |||
420 | * an *extremely* unlikely race with gen6_rps_enable() that is prevented | 421 | * an *extremely* unlikely race with gen6_rps_enable() that is prevented |
421 | * by holding struct_mutex for the duration of the write. | 422 | * by holding struct_mutex for the duration of the write. |
422 | */ | 423 | */ |
423 | I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir); | ||
424 | mutex_unlock(&dev_priv->dev->struct_mutex); | 424 | mutex_unlock(&dev_priv->dev->struct_mutex); |
425 | } | 425 | } |
426 | 426 | ||
@@ -536,8 +536,9 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) | |||
536 | unsigned long flags; | 536 | unsigned long flags; |
537 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | 537 | spin_lock_irqsave(&dev_priv->rps_lock, flags); |
538 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | 538 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); |
539 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
540 | dev_priv->pm_iir |= pm_iir; | 539 | dev_priv->pm_iir |= pm_iir; |
540 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); | ||
541 | POSTING_READ(GEN6_PMIMR); | ||
541 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | 542 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); |
542 | queue_work(dev_priv->wq, &dev_priv->rps_work); | 543 | queue_work(dev_priv->wq, &dev_priv->rps_work); |
543 | } | 544 | } |
@@ -649,8 +650,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS) | |||
649 | unsigned long flags; | 650 | unsigned long flags; |
650 | spin_lock_irqsave(&dev_priv->rps_lock, flags); | 651 | spin_lock_irqsave(&dev_priv->rps_lock, flags); |
651 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); | 652 | WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n"); |
652 | I915_WRITE(GEN6_PMIMR, pm_iir); | ||
653 | dev_priv->pm_iir |= pm_iir; | 653 | dev_priv->pm_iir |= pm_iir; |
654 | I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir); | ||
655 | POSTING_READ(GEN6_PMIMR); | ||
654 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); | 656 | spin_unlock_irqrestore(&dev_priv->rps_lock, flags); |
655 | queue_work(dev_priv->wq, &dev_priv->rps_work); | 657 | queue_work(dev_priv->wq, &dev_priv->rps_work); |
656 | } | 658 | } |
@@ -1777,6 +1779,26 @@ static void ironlake_irq_preinstall(struct drm_device *dev) | |||
1777 | POSTING_READ(SDEIER); | 1779 | POSTING_READ(SDEIER); |
1778 | } | 1780 | } |
1779 | 1781 | ||
1782 | /* | ||
1783 | * Enable digital hotplug on the PCH, and configure the DP short pulse | ||
1784 | * duration to 2ms (which is the minimum in the Display Port spec) | ||
1785 | * | ||
1786 | * This register is the same on all known PCH chips. | ||
1787 | */ | ||
1788 | |||
1789 | static void ironlake_enable_pch_hotplug(struct drm_device *dev) | ||
1790 | { | ||
1791 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
1792 | u32 hotplug; | ||
1793 | |||
1794 | hotplug = I915_READ(PCH_PORT_HOTPLUG); | ||
1795 | hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK); | ||
1796 | hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms; | ||
1797 | hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms; | ||
1798 | hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms; | ||
1799 | I915_WRITE(PCH_PORT_HOTPLUG, hotplug); | ||
1800 | } | ||
1801 | |||
1780 | static int ironlake_irq_postinstall(struct drm_device *dev) | 1802 | static int ironlake_irq_postinstall(struct drm_device *dev) |
1781 | { | 1803 | { |
1782 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | 1804 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; |
@@ -1839,6 +1861,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev) | |||
1839 | I915_WRITE(SDEIER, hotplug_mask); | 1861 | I915_WRITE(SDEIER, hotplug_mask); |
1840 | POSTING_READ(SDEIER); | 1862 | POSTING_READ(SDEIER); |
1841 | 1863 | ||
1864 | ironlake_enable_pch_hotplug(dev); | ||
1865 | |||
1842 | if (IS_IRONLAKE_M(dev)) { | 1866 | if (IS_IRONLAKE_M(dev)) { |
1843 | /* Clear & enable PCU event interrupts */ | 1867 | /* Clear & enable PCU event interrupts */ |
1844 | I915_WRITE(DEIIR, DE_PCU_EVENT); | 1868 | I915_WRITE(DEIIR, DE_PCU_EVENT); |
@@ -1896,6 +1920,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev) | |||
1896 | I915_WRITE(SDEIER, hotplug_mask); | 1920 | I915_WRITE(SDEIER, hotplug_mask); |
1897 | POSTING_READ(SDEIER); | 1921 | POSTING_READ(SDEIER); |
1898 | 1922 | ||
1923 | ironlake_enable_pch_hotplug(dev); | ||
1924 | |||
1899 | return 0; | 1925 | return 0; |
1900 | } | 1926 | } |
1901 | 1927 | ||
@@ -2020,6 +2046,10 @@ static void ironlake_irq_uninstall(struct drm_device *dev) | |||
2020 | I915_WRITE(GTIMR, 0xffffffff); | 2046 | I915_WRITE(GTIMR, 0xffffffff); |
2021 | I915_WRITE(GTIER, 0x0); | 2047 | I915_WRITE(GTIER, 0x0); |
2022 | I915_WRITE(GTIIR, I915_READ(GTIIR)); | 2048 | I915_WRITE(GTIIR, I915_READ(GTIIR)); |
2049 | |||
2050 | I915_WRITE(SDEIMR, 0xffffffff); | ||
2051 | I915_WRITE(SDEIER, 0x0); | ||
2052 | I915_WRITE(SDEIIR, I915_READ(SDEIIR)); | ||
2023 | } | 2053 | } |
2024 | 2054 | ||
2025 | static void i915_driver_irq_uninstall(struct drm_device * dev) | 2055 | static void i915_driver_irq_uninstall(struct drm_device * dev) |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 138eae15becd..5a09416e611f 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -242,16 +242,22 @@ | |||
242 | #define ASYNC_FLIP (1<<22) | 242 | #define ASYNC_FLIP (1<<22) |
243 | #define DISPLAY_PLANE_A (0<<20) | 243 | #define DISPLAY_PLANE_A (0<<20) |
244 | #define DISPLAY_PLANE_B (1<<20) | 244 | #define DISPLAY_PLANE_B (1<<20) |
245 | #define GFX_OP_PIPE_CONTROL ((0x3<<29)|(0x3<<27)|(0x2<<24)|2) | 245 | #define GFX_OP_PIPE_CONTROL(len) ((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2)) |
246 | #define PIPE_CONTROL_QW_WRITE (1<<14) | 246 | #define PIPE_CONTROL_CS_STALL (1<<20) |
247 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) | 247 | #define PIPE_CONTROL_QW_WRITE (1<<14) |
248 | #define PIPE_CONTROL_WC_FLUSH (1<<12) | 248 | #define PIPE_CONTROL_DEPTH_STALL (1<<13) |
249 | #define PIPE_CONTROL_IS_FLUSH (1<<11) /* MBZ on Ironlake */ | 249 | #define PIPE_CONTROL_WRITE_FLUSH (1<<12) |
250 | #define PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */ | 250 | #define PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH (1<<12) /* gen6+ */ |
251 | #define PIPE_CONTROL_ISP_DIS (1<<9) | 251 | #define PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE (1<<11) /* MBZ on Ironlake */ |
252 | #define PIPE_CONTROL_NOTIFY (1<<8) | 252 | #define PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE (1<<10) /* GM45+ only */ |
253 | #define PIPE_CONTROL_INDIRECT_STATE_DISABLE (1<<9) | ||
254 | #define PIPE_CONTROL_NOTIFY (1<<8) | ||
255 | #define PIPE_CONTROL_VF_CACHE_INVALIDATE (1<<4) | ||
256 | #define PIPE_CONTROL_CONST_CACHE_INVALIDATE (1<<3) | ||
257 | #define PIPE_CONTROL_STATE_CACHE_INVALIDATE (1<<2) | ||
258 | #define PIPE_CONTROL_STALL_AT_SCOREBOARD (1<<1) | ||
259 | #define PIPE_CONTROL_DEPTH_CACHE_FLUSH (1<<0) | ||
253 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ | 260 | #define PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */ |
254 | #define PIPE_CONTROL_STALL_EN (1<<1) /* in addr word, Ironlake+ only */ | ||
255 | 261 | ||
256 | 262 | ||
257 | /* | 263 | /* |
@@ -2429,6 +2435,7 @@ | |||
2429 | #define WM0_PIPE_CURSOR_MASK (0x1f) | 2435 | #define WM0_PIPE_CURSOR_MASK (0x1f) |
2430 | 2436 | ||
2431 | #define WM0_PIPEB_ILK 0x45104 | 2437 | #define WM0_PIPEB_ILK 0x45104 |
2438 | #define WM0_PIPEC_IVB 0x45200 | ||
2432 | #define WM1_LP_ILK 0x45108 | 2439 | #define WM1_LP_ILK 0x45108 |
2433 | #define WM1_LP_SR_EN (1<<31) | 2440 | #define WM1_LP_SR_EN (1<<31) |
2434 | #define WM1_LP_LATENCY_SHIFT 24 | 2441 | #define WM1_LP_LATENCY_SHIFT 24 |
@@ -2567,10 +2574,18 @@ | |||
2567 | #define _CURBBASE 0x700c4 | 2574 | #define _CURBBASE 0x700c4 |
2568 | #define _CURBPOS 0x700c8 | 2575 | #define _CURBPOS 0x700c8 |
2569 | 2576 | ||
2577 | #define _CURBCNTR_IVB 0x71080 | ||
2578 | #define _CURBBASE_IVB 0x71084 | ||
2579 | #define _CURBPOS_IVB 0x71088 | ||
2580 | |||
2570 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) | 2581 | #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR) |
2571 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) | 2582 | #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE) |
2572 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) | 2583 | #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS) |
2573 | 2584 | ||
2585 | #define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB) | ||
2586 | #define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB) | ||
2587 | #define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB) | ||
2588 | |||
2574 | /* Display A control */ | 2589 | /* Display A control */ |
2575 | #define _DSPACNTR 0x70180 | 2590 | #define _DSPACNTR 0x70180 |
2576 | #define DISPLAY_PLANE_ENABLE (1<<31) | 2591 | #define DISPLAY_PLANE_ENABLE (1<<31) |
@@ -2916,12 +2931,13 @@ | |||
2916 | #define SDEIER 0xc400c | 2931 | #define SDEIER 0xc400c |
2917 | 2932 | ||
2918 | /* digital port hotplug */ | 2933 | /* digital port hotplug */ |
2919 | #define PCH_PORT_HOTPLUG 0xc4030 | 2934 | #define PCH_PORT_HOTPLUG 0xc4030 /* SHOTPLUG_CTL */ |
2920 | #define PORTD_HOTPLUG_ENABLE (1 << 20) | 2935 | #define PORTD_HOTPLUG_ENABLE (1 << 20) |
2921 | #define PORTD_PULSE_DURATION_2ms (0) | 2936 | #define PORTD_PULSE_DURATION_2ms (0) |
2922 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) | 2937 | #define PORTD_PULSE_DURATION_4_5ms (1 << 18) |
2923 | #define PORTD_PULSE_DURATION_6ms (2 << 18) | 2938 | #define PORTD_PULSE_DURATION_6ms (2 << 18) |
2924 | #define PORTD_PULSE_DURATION_100ms (3 << 18) | 2939 | #define PORTD_PULSE_DURATION_100ms (3 << 18) |
2940 | #define PORTD_PULSE_DURATION_MASK (3 << 18) | ||
2925 | #define PORTD_HOTPLUG_NO_DETECT (0) | 2941 | #define PORTD_HOTPLUG_NO_DETECT (0) |
2926 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) | 2942 | #define PORTD_HOTPLUG_SHORT_DETECT (1 << 16) |
2927 | #define PORTD_HOTPLUG_LONG_DETECT (1 << 17) | 2943 | #define PORTD_HOTPLUG_LONG_DETECT (1 << 17) |
@@ -2930,6 +2946,7 @@ | |||
2930 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) | 2946 | #define PORTC_PULSE_DURATION_4_5ms (1 << 10) |
2931 | #define PORTC_PULSE_DURATION_6ms (2 << 10) | 2947 | #define PORTC_PULSE_DURATION_6ms (2 << 10) |
2932 | #define PORTC_PULSE_DURATION_100ms (3 << 10) | 2948 | #define PORTC_PULSE_DURATION_100ms (3 << 10) |
2949 | #define PORTC_PULSE_DURATION_MASK (3 << 10) | ||
2933 | #define PORTC_HOTPLUG_NO_DETECT (0) | 2950 | #define PORTC_HOTPLUG_NO_DETECT (0) |
2934 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) | 2951 | #define PORTC_HOTPLUG_SHORT_DETECT (1 << 8) |
2935 | #define PORTC_HOTPLUG_LONG_DETECT (1 << 9) | 2952 | #define PORTC_HOTPLUG_LONG_DETECT (1 << 9) |
@@ -2938,6 +2955,7 @@ | |||
2938 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) | 2955 | #define PORTB_PULSE_DURATION_4_5ms (1 << 2) |
2939 | #define PORTB_PULSE_DURATION_6ms (2 << 2) | 2956 | #define PORTB_PULSE_DURATION_6ms (2 << 2) |
2940 | #define PORTB_PULSE_DURATION_100ms (3 << 2) | 2957 | #define PORTB_PULSE_DURATION_100ms (3 << 2) |
2958 | #define PORTB_PULSE_DURATION_MASK (3 << 2) | ||
2941 | #define PORTB_HOTPLUG_NO_DETECT (0) | 2959 | #define PORTB_HOTPLUG_NO_DETECT (0) |
2942 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) | 2960 | #define PORTB_HOTPLUG_SHORT_DETECT (1 << 0) |
2943 | #define PORTB_HOTPLUG_LONG_DETECT (1 << 1) | 2961 | #define PORTB_HOTPLUG_LONG_DETECT (1 << 1) |
@@ -2958,15 +2976,15 @@ | |||
2958 | 2976 | ||
2959 | #define _PCH_DPLL_A 0xc6014 | 2977 | #define _PCH_DPLL_A 0xc6014 |
2960 | #define _PCH_DPLL_B 0xc6018 | 2978 | #define _PCH_DPLL_B 0xc6018 |
2961 | #define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B) | 2979 | #define PCH_DPLL(pipe) (pipe == 0 ? _PCH_DPLL_A : _PCH_DPLL_B) |
2962 | 2980 | ||
2963 | #define _PCH_FPA0 0xc6040 | 2981 | #define _PCH_FPA0 0xc6040 |
2964 | #define FP_CB_TUNE (0x3<<22) | 2982 | #define FP_CB_TUNE (0x3<<22) |
2965 | #define _PCH_FPA1 0xc6044 | 2983 | #define _PCH_FPA1 0xc6044 |
2966 | #define _PCH_FPB0 0xc6048 | 2984 | #define _PCH_FPB0 0xc6048 |
2967 | #define _PCH_FPB1 0xc604c | 2985 | #define _PCH_FPB1 0xc604c |
2968 | #define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0) | 2986 | #define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0) |
2969 | #define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1) | 2987 | #define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1) |
2970 | 2988 | ||
2971 | #define PCH_DPLL_TEST 0xc606c | 2989 | #define PCH_DPLL_TEST 0xc606c |
2972 | 2990 | ||
@@ -3180,6 +3198,7 @@ | |||
3180 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) | 3198 | #define FDI_LINK_TRAIN_NONE_IVB (3<<8) |
3181 | 3199 | ||
3182 | /* both Tx and Rx */ | 3200 | /* both Tx and Rx */ |
3201 | #define FDI_COMPOSITE_SYNC (1<<11) | ||
3183 | #define FDI_LINK_TRAIN_AUTO (1<<10) | 3202 | #define FDI_LINK_TRAIN_AUTO (1<<10) |
3184 | #define FDI_SCRAMBLING_ENABLE (0<<7) | 3203 | #define FDI_SCRAMBLING_ENABLE (0<<7) |
3185 | #define FDI_SCRAMBLING_DISABLE (1<<7) | 3204 | #define FDI_SCRAMBLING_DISABLE (1<<7) |
@@ -3321,15 +3340,35 @@ | |||
3321 | #define PCH_PP_STATUS 0xc7200 | 3340 | #define PCH_PP_STATUS 0xc7200 |
3322 | #define PCH_PP_CONTROL 0xc7204 | 3341 | #define PCH_PP_CONTROL 0xc7204 |
3323 | #define PANEL_UNLOCK_REGS (0xabcd << 16) | 3342 | #define PANEL_UNLOCK_REGS (0xabcd << 16) |
3343 | #define PANEL_UNLOCK_MASK (0xffff << 16) | ||
3324 | #define EDP_FORCE_VDD (1 << 3) | 3344 | #define EDP_FORCE_VDD (1 << 3) |
3325 | #define EDP_BLC_ENABLE (1 << 2) | 3345 | #define EDP_BLC_ENABLE (1 << 2) |
3326 | #define PANEL_POWER_RESET (1 << 1) | 3346 | #define PANEL_POWER_RESET (1 << 1) |
3327 | #define PANEL_POWER_OFF (0 << 0) | 3347 | #define PANEL_POWER_OFF (0 << 0) |
3328 | #define PANEL_POWER_ON (1 << 0) | 3348 | #define PANEL_POWER_ON (1 << 0) |
3329 | #define PCH_PP_ON_DELAYS 0xc7208 | 3349 | #define PCH_PP_ON_DELAYS 0xc7208 |
3350 | #define PANEL_PORT_SELECT_MASK (3 << 30) | ||
3351 | #define PANEL_PORT_SELECT_LVDS (0 << 30) | ||
3352 | #define PANEL_PORT_SELECT_DPA (1 << 30) | ||
3330 | #define EDP_PANEL (1 << 30) | 3353 | #define EDP_PANEL (1 << 30) |
3354 | #define PANEL_PORT_SELECT_DPC (2 << 30) | ||
3355 | #define PANEL_PORT_SELECT_DPD (3 << 30) | ||
3356 | #define PANEL_POWER_UP_DELAY_MASK (0x1fff0000) | ||
3357 | #define PANEL_POWER_UP_DELAY_SHIFT 16 | ||
3358 | #define PANEL_LIGHT_ON_DELAY_MASK (0x1fff) | ||
3359 | #define PANEL_LIGHT_ON_DELAY_SHIFT 0 | ||
3360 | |||
3331 | #define PCH_PP_OFF_DELAYS 0xc720c | 3361 | #define PCH_PP_OFF_DELAYS 0xc720c |
3362 | #define PANEL_POWER_DOWN_DELAY_MASK (0x1fff0000) | ||
3363 | #define PANEL_POWER_DOWN_DELAY_SHIFT 16 | ||
3364 | #define PANEL_LIGHT_OFF_DELAY_MASK (0x1fff) | ||
3365 | #define PANEL_LIGHT_OFF_DELAY_SHIFT 0 | ||
3366 | |||
3332 | #define PCH_PP_DIVISOR 0xc7210 | 3367 | #define PCH_PP_DIVISOR 0xc7210 |
3368 | #define PP_REFERENCE_DIVIDER_MASK (0xffffff00) | ||
3369 | #define PP_REFERENCE_DIVIDER_SHIFT 8 | ||
3370 | #define PANEL_POWER_CYCLE_DELAY_MASK (0x1f) | ||
3371 | #define PANEL_POWER_CYCLE_DELAY_SHIFT 0 | ||
3333 | 3372 | ||
3334 | #define PCH_DP_B 0xe4100 | 3373 | #define PCH_DP_B 0xe4100 |
3335 | #define PCH_DPB_AUX_CH_CTL 0xe4110 | 3374 | #define PCH_DPB_AUX_CH_CTL 0xe4110 |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index 33378da63fdb..63880e2e5cfd 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright � 2006 Intel Corporation | 2 | * Copyright �� 2006 Intel Corporation |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -309,6 +309,13 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
309 | dev_priv->lvds_use_ssc = general->enable_ssc; | 309 | dev_priv->lvds_use_ssc = general->enable_ssc; |
310 | dev_priv->lvds_ssc_freq = | 310 | dev_priv->lvds_ssc_freq = |
311 | intel_bios_ssc_frequency(dev, general->ssc_freq); | 311 | intel_bios_ssc_frequency(dev, general->ssc_freq); |
312 | dev_priv->display_clock_mode = general->display_clock_mode; | ||
313 | DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n", | ||
314 | dev_priv->int_tv_support, | ||
315 | dev_priv->int_crt_support, | ||
316 | dev_priv->lvds_use_ssc, | ||
317 | dev_priv->lvds_ssc_freq, | ||
318 | dev_priv->display_clock_mode); | ||
312 | } | 319 | } |
313 | } | 320 | } |
314 | 321 | ||
@@ -396,15 +403,13 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, | |||
396 | p_mapping->dvo_wiring = p_child->dvo_wiring; | 403 | p_mapping->dvo_wiring = p_child->dvo_wiring; |
397 | p_mapping->ddc_pin = p_child->ddc_pin; | 404 | p_mapping->ddc_pin = p_child->ddc_pin; |
398 | p_mapping->i2c_pin = p_child->i2c_pin; | 405 | p_mapping->i2c_pin = p_child->i2c_pin; |
399 | p_mapping->i2c_speed = p_child->i2c_speed; | ||
400 | p_mapping->initialized = 1; | 406 | p_mapping->initialized = 1; |
401 | DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n", | 407 | DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n", |
402 | p_mapping->dvo_port, | 408 | p_mapping->dvo_port, |
403 | p_mapping->slave_addr, | 409 | p_mapping->slave_addr, |
404 | p_mapping->dvo_wiring, | 410 | p_mapping->dvo_wiring, |
405 | p_mapping->ddc_pin, | 411 | p_mapping->ddc_pin, |
406 | p_mapping->i2c_pin, | 412 | p_mapping->i2c_pin); |
407 | p_mapping->i2c_speed); | ||
408 | } else { | 413 | } else { |
409 | DRM_DEBUG_KMS("Maybe one SDVO port is shared by " | 414 | DRM_DEBUG_KMS("Maybe one SDVO port is shared by " |
410 | "two SDVO device.\n"); | 415 | "two SDVO device.\n"); |
@@ -610,7 +615,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv) | |||
610 | /* Default to using SSC */ | 615 | /* Default to using SSC */ |
611 | dev_priv->lvds_use_ssc = 1; | 616 | dev_priv->lvds_use_ssc = 1; |
612 | dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); | 617 | dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1); |
613 | DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); | 618 | DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq); |
614 | 619 | ||
615 | /* eDP data */ | 620 | /* eDP data */ |
616 | dev_priv->edp.bpp = 18; | 621 | dev_priv->edp.bpp = 18; |
@@ -639,7 +644,7 @@ intel_parse_bios(struct drm_device *dev) | |||
639 | if (dev_priv->opregion.vbt) { | 644 | if (dev_priv->opregion.vbt) { |
640 | struct vbt_header *vbt = dev_priv->opregion.vbt; | 645 | struct vbt_header *vbt = dev_priv->opregion.vbt; |
641 | if (memcmp(vbt->signature, "$VBT", 4) == 0) { | 646 | if (memcmp(vbt->signature, "$VBT", 4) == 0) { |
642 | DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n", | 647 | DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n", |
643 | vbt->signature); | 648 | vbt->signature); |
644 | bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); | 649 | bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset); |
645 | } else | 650 | } else |
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h index c2e38feb7899..8af3735e27c6 100644 --- a/drivers/gpu/drm/i915/intel_bios.h +++ b/drivers/gpu/drm/i915/intel_bios.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright � 2006 Intel Corporation | 2 | * Copyright �� 2006 Intel Corporation |
3 | * | 3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), | 5 | * copy of this software and associated documentation files (the "Software"), |
@@ -120,7 +120,9 @@ struct bdb_general_features { | |||
120 | u8 ssc_freq:1; | 120 | u8 ssc_freq:1; |
121 | u8 enable_lfp_on_override:1; | 121 | u8 enable_lfp_on_override:1; |
122 | u8 disable_ssc_ddt:1; | 122 | u8 disable_ssc_ddt:1; |
123 | u8 rsvd8:3; /* finish byte */ | 123 | u8 rsvd7:1; |
124 | u8 display_clock_mode:1; | ||
125 | u8 rsvd8:1; /* finish byte */ | ||
124 | 126 | ||
125 | /* bits 3 */ | 127 | /* bits 3 */ |
126 | u8 disable_smooth_vision:1; | 128 | u8 disable_smooth_vision:1; |
@@ -133,7 +135,10 @@ struct bdb_general_features { | |||
133 | /* bits 5 */ | 135 | /* bits 5 */ |
134 | u8 int_crt_support:1; | 136 | u8 int_crt_support:1; |
135 | u8 int_tv_support:1; | 137 | u8 int_tv_support:1; |
136 | u8 rsvd11:6; /* finish byte */ | 138 | u8 int_efp_support:1; |
139 | u8 dp_ssc_enb:1; /* PCH attached eDP supports SSC */ | ||
140 | u8 dp_ssc_freq:1; /* SSC freq for PCH attached eDP */ | ||
141 | u8 rsvd11:3; /* finish byte */ | ||
137 | } __attribute__((packed)); | 142 | } __attribute__((packed)); |
138 | 143 | ||
139 | /* pre-915 */ | 144 | /* pre-915 */ |
@@ -197,8 +202,7 @@ struct bdb_general_features { | |||
197 | struct child_device_config { | 202 | struct child_device_config { |
198 | u16 handle; | 203 | u16 handle; |
199 | u16 device_type; | 204 | u16 device_type; |
200 | u8 i2c_speed; | 205 | u8 device_id[10]; /* ascii string */ |
201 | u8 rsvd[9]; | ||
202 | u16 addin_offset; | 206 | u16 addin_offset; |
203 | u8 dvo_port; /* See Device_PORT_* above */ | 207 | u8 dvo_port; /* See Device_PORT_* above */ |
204 | u8 i2c_pin; | 208 | u8 i2c_pin; |
@@ -446,11 +450,11 @@ struct bdb_driver_features { | |||
446 | #define EDP_VSWING_1_2V 3 | 450 | #define EDP_VSWING_1_2V 3 |
447 | 451 | ||
448 | struct edp_power_seq { | 452 | struct edp_power_seq { |
449 | u16 t3; | 453 | u16 t1_t3; |
450 | u16 t7; | 454 | u16 t8; |
451 | u16 t9; | 455 | u16 t9; |
452 | u16 t10; | 456 | u16 t10; |
453 | u16 t12; | 457 | u16 t11_t12; |
454 | } __attribute__ ((packed)); | 458 | } __attribute__ ((packed)); |
455 | 459 | ||
456 | struct edp_link_params { | 460 | struct edp_link_params { |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index 451534cb44de..fee0ad02c6d0 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -152,17 +152,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
152 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 152 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
153 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; | 153 | adpa |= ADPA_VSYNC_ACTIVE_HIGH; |
154 | 154 | ||
155 | if (intel_crtc->pipe == 0) { | 155 | /* For CPT allow 3 pipe config, for others just use A or B */ |
156 | if (HAS_PCH_CPT(dev)) | 156 | if (HAS_PCH_CPT(dev)) |
157 | adpa |= PORT_TRANS_A_SEL_CPT; | 157 | adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); |
158 | else | 158 | else if (intel_crtc->pipe == 0) |
159 | adpa |= ADPA_PIPE_A_SELECT; | 159 | adpa |= ADPA_PIPE_A_SELECT; |
160 | } else { | 160 | else |
161 | if (HAS_PCH_CPT(dev)) | 161 | adpa |= ADPA_PIPE_B_SELECT; |
162 | adpa |= PORT_TRANS_B_SEL_CPT; | ||
163 | else | ||
164 | adpa |= ADPA_PIPE_B_SELECT; | ||
165 | } | ||
166 | 162 | ||
167 | if (!HAS_PCH_SPLIT(dev)) | 163 | if (!HAS_PCH_SPLIT(dev)) |
168 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); | 164 | I915_WRITE(BCLRPAT(intel_crtc->pipe), 0); |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 8230cf54cc8d..981b1f1c04d8 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -803,6 +803,19 @@ static void assert_pch_pll(struct drm_i915_private *dev_priv, | |||
803 | u32 val; | 803 | u32 val; |
804 | bool cur_state; | 804 | bool cur_state; |
805 | 805 | ||
806 | if (HAS_PCH_CPT(dev_priv->dev)) { | ||
807 | u32 pch_dpll; | ||
808 | |||
809 | pch_dpll = I915_READ(PCH_DPLL_SEL); | ||
810 | |||
811 | /* Make sure the selected PLL is enabled to the transcoder */ | ||
812 | WARN(!((pch_dpll >> (4 * pipe)) & 8), | ||
813 | "transcoder %d PLL not enabled\n", pipe); | ||
814 | |||
815 | /* Convert the transcoder pipe number to a pll pipe number */ | ||
816 | pipe = (pch_dpll >> (4 * pipe)) & 1; | ||
817 | } | ||
818 | |||
806 | reg = PCH_DPLL(pipe); | 819 | reg = PCH_DPLL(pipe); |
807 | val = I915_READ(reg); | 820 | val = I915_READ(reg); |
808 | cur_state = !!(val & DPLL_VCO_ENABLE); | 821 | cur_state = !!(val & DPLL_VCO_ENABLE); |
@@ -1172,6 +1185,9 @@ static void intel_enable_pch_pll(struct drm_i915_private *dev_priv, | |||
1172 | int reg; | 1185 | int reg; |
1173 | u32 val; | 1186 | u32 val; |
1174 | 1187 | ||
1188 | if (pipe > 1) | ||
1189 | return; | ||
1190 | |||
1175 | /* PCH only available on ILK+ */ | 1191 | /* PCH only available on ILK+ */ |
1176 | BUG_ON(dev_priv->info->gen < 5); | 1192 | BUG_ON(dev_priv->info->gen < 5); |
1177 | 1193 | ||
@@ -1192,6 +1208,9 @@ static void intel_disable_pch_pll(struct drm_i915_private *dev_priv, | |||
1192 | int reg; | 1208 | int reg; |
1193 | u32 val; | 1209 | u32 val; |
1194 | 1210 | ||
1211 | if (pipe > 1) | ||
1212 | return; | ||
1213 | |||
1195 | /* PCH only available on ILK+ */ | 1214 | /* PCH only available on ILK+ */ |
1196 | BUG_ON(dev_priv->info->gen < 5); | 1215 | BUG_ON(dev_priv->info->gen < 5); |
1197 | 1216 | ||
@@ -1257,7 +1276,7 @@ static void intel_disable_transcoder(struct drm_i915_private *dev_priv, | |||
1257 | I915_WRITE(reg, val); | 1276 | I915_WRITE(reg, val); |
1258 | /* wait for PCH transcoder off, transcoder state */ | 1277 | /* wait for PCH transcoder off, transcoder state */ |
1259 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) | 1278 | if (wait_for((I915_READ(reg) & TRANS_STATE_ENABLE) == 0, 50)) |
1260 | DRM_ERROR("failed to disable transcoder\n"); | 1279 | DRM_ERROR("failed to disable transcoder %d\n", pipe); |
1261 | } | 1280 | } |
1262 | 1281 | ||
1263 | /** | 1282 | /** |
@@ -2086,6 +2105,7 @@ static int ironlake_update_plane(struct drm_crtc *crtc, | |||
2086 | switch (plane) { | 2105 | switch (plane) { |
2087 | case 0: | 2106 | case 0: |
2088 | case 1: | 2107 | case 1: |
2108 | case 2: | ||
2089 | break; | 2109 | break; |
2090 | default: | 2110 | default: |
2091 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); | 2111 | DRM_ERROR("Can't update plane %d in SAREA\n", plane); |
@@ -2185,6 +2205,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
2185 | case 0: | 2205 | case 0: |
2186 | case 1: | 2206 | case 1: |
2187 | break; | 2207 | break; |
2208 | case 2: | ||
2209 | if (IS_IVYBRIDGE(dev)) | ||
2210 | break; | ||
2211 | /* fall through otherwise */ | ||
2188 | default: | 2212 | default: |
2189 | DRM_ERROR("no plane for crtc\n"); | 2213 | DRM_ERROR("no plane for crtc\n"); |
2190 | return -EINVAL; | 2214 | return -EINVAL; |
@@ -2601,6 +2625,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2601 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; | 2625 | temp |= FDI_LINK_TRAIN_PATTERN_1_IVB; |
2602 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; | 2626 | temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK; |
2603 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; | 2627 | temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B; |
2628 | temp |= FDI_COMPOSITE_SYNC; | ||
2604 | I915_WRITE(reg, temp | FDI_TX_ENABLE); | 2629 | I915_WRITE(reg, temp | FDI_TX_ENABLE); |
2605 | 2630 | ||
2606 | reg = FDI_RX_CTL(pipe); | 2631 | reg = FDI_RX_CTL(pipe); |
@@ -2608,6 +2633,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc) | |||
2608 | temp &= ~FDI_LINK_TRAIN_AUTO; | 2633 | temp &= ~FDI_LINK_TRAIN_AUTO; |
2609 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; | 2634 | temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT; |
2610 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; | 2635 | temp |= FDI_LINK_TRAIN_PATTERN_1_CPT; |
2636 | temp |= FDI_COMPOSITE_SYNC; | ||
2611 | I915_WRITE(reg, temp | FDI_RX_ENABLE); | 2637 | I915_WRITE(reg, temp | FDI_RX_ENABLE); |
2612 | 2638 | ||
2613 | POSTING_READ(reg); | 2639 | POSTING_READ(reg); |
@@ -2867,7 +2893,7 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2867 | struct drm_i915_private *dev_priv = dev->dev_private; | 2893 | struct drm_i915_private *dev_priv = dev->dev_private; |
2868 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 2894 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
2869 | int pipe = intel_crtc->pipe; | 2895 | int pipe = intel_crtc->pipe; |
2870 | u32 reg, temp; | 2896 | u32 reg, temp, transc_sel; |
2871 | 2897 | ||
2872 | /* For PCH output, training FDI link */ | 2898 | /* For PCH output, training FDI link */ |
2873 | dev_priv->display.fdi_link_train(crtc); | 2899 | dev_priv->display.fdi_link_train(crtc); |
@@ -2875,12 +2901,21 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2875 | intel_enable_pch_pll(dev_priv, pipe); | 2901 | intel_enable_pch_pll(dev_priv, pipe); |
2876 | 2902 | ||
2877 | if (HAS_PCH_CPT(dev)) { | 2903 | if (HAS_PCH_CPT(dev)) { |
2904 | transc_sel = intel_crtc->use_pll_a ? TRANSC_DPLLA_SEL : | ||
2905 | TRANSC_DPLLB_SEL; | ||
2906 | |||
2878 | /* Be sure PCH DPLL SEL is set */ | 2907 | /* Be sure PCH DPLL SEL is set */ |
2879 | temp = I915_READ(PCH_DPLL_SEL); | 2908 | temp = I915_READ(PCH_DPLL_SEL); |
2880 | if (pipe == 0 && (temp & TRANSA_DPLL_ENABLE) == 0) | 2909 | if (pipe == 0) { |
2910 | temp &= ~(TRANSA_DPLLB_SEL); | ||
2881 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | 2911 | temp |= (TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); |
2882 | else if (pipe == 1 && (temp & TRANSB_DPLL_ENABLE) == 0) | 2912 | } else if (pipe == 1) { |
2913 | temp &= ~(TRANSB_DPLLB_SEL); | ||
2883 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 2914 | temp |= (TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
2915 | } else if (pipe == 2) { | ||
2916 | temp &= ~(TRANSC_DPLLB_SEL); | ||
2917 | temp |= (TRANSC_DPLL_ENABLE | transc_sel); | ||
2918 | } | ||
2884 | I915_WRITE(PCH_DPLL_SEL, temp); | 2919 | I915_WRITE(PCH_DPLL_SEL, temp); |
2885 | } | 2920 | } |
2886 | 2921 | ||
@@ -2936,6 +2971,24 @@ static void ironlake_pch_enable(struct drm_crtc *crtc) | |||
2936 | intel_enable_transcoder(dev_priv, pipe); | 2971 | intel_enable_transcoder(dev_priv, pipe); |
2937 | } | 2972 | } |
2938 | 2973 | ||
2974 | void intel_cpt_verify_modeset(struct drm_device *dev, int pipe) | ||
2975 | { | ||
2976 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
2977 | int dslreg = PIPEDSL(pipe), tc2reg = TRANS_CHICKEN2(pipe); | ||
2978 | u32 temp; | ||
2979 | |||
2980 | temp = I915_READ(dslreg); | ||
2981 | udelay(500); | ||
2982 | if (wait_for(I915_READ(dslreg) != temp, 5)) { | ||
2983 | /* Without this, mode sets may fail silently on FDI */ | ||
2984 | I915_WRITE(tc2reg, TRANS_AUTOTRAIN_GEN_STALL_DIS); | ||
2985 | udelay(250); | ||
2986 | I915_WRITE(tc2reg, 0); | ||
2987 | if (wait_for(I915_READ(dslreg) != temp, 5)) | ||
2988 | DRM_ERROR("mode set failed: pipe %d stuck\n", pipe); | ||
2989 | } | ||
2990 | } | ||
2991 | |||
2939 | static void ironlake_crtc_enable(struct drm_crtc *crtc) | 2992 | static void ironlake_crtc_enable(struct drm_crtc *crtc) |
2940 | { | 2993 | { |
2941 | struct drm_device *dev = crtc->dev; | 2994 | struct drm_device *dev = crtc->dev; |
@@ -3046,13 +3099,13 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3046 | temp = I915_READ(PCH_DPLL_SEL); | 3099 | temp = I915_READ(PCH_DPLL_SEL); |
3047 | switch (pipe) { | 3100 | switch (pipe) { |
3048 | case 0: | 3101 | case 0: |
3049 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL); | 3102 | temp &= ~(TRANSA_DPLL_ENABLE | TRANSA_DPLLB_SEL); |
3050 | break; | 3103 | break; |
3051 | case 1: | 3104 | case 1: |
3052 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); | 3105 | temp &= ~(TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL); |
3053 | break; | 3106 | break; |
3054 | case 2: | 3107 | case 2: |
3055 | /* FIXME: manage transcoder PLLs? */ | 3108 | /* C shares PLL A or B */ |
3056 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); | 3109 | temp &= ~(TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL); |
3057 | break; | 3110 | break; |
3058 | default: | 3111 | default: |
@@ -3062,7 +3115,8 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc) | |||
3062 | } | 3115 | } |
3063 | 3116 | ||
3064 | /* disable PCH DPLL */ | 3117 | /* disable PCH DPLL */ |
3065 | intel_disable_pch_pll(dev_priv, pipe); | 3118 | if (!intel_crtc->no_pll) |
3119 | intel_disable_pch_pll(dev_priv, pipe); | ||
3066 | 3120 | ||
3067 | /* Switch from PCDclk to Rawclk */ | 3121 | /* Switch from PCDclk to Rawclk */ |
3068 | reg = FDI_RX_CTL(pipe); | 3122 | reg = FDI_RX_CTL(pipe); |
@@ -3304,8 +3358,15 @@ void intel_encoder_prepare(struct drm_encoder *encoder) | |||
3304 | void intel_encoder_commit(struct drm_encoder *encoder) | 3358 | void intel_encoder_commit(struct drm_encoder *encoder) |
3305 | { | 3359 | { |
3306 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; | 3360 | struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private; |
3361 | struct drm_device *dev = encoder->dev; | ||
3362 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | ||
3363 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_encoder->base.crtc); | ||
3364 | |||
3307 | /* lvds has its own version of commit see intel_lvds_commit */ | 3365 | /* lvds has its own version of commit see intel_lvds_commit */ |
3308 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); | 3366 | encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON); |
3367 | |||
3368 | if (HAS_PCH_CPT(dev)) | ||
3369 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); | ||
3309 | } | 3370 | } |
3310 | 3371 | ||
3311 | void intel_encoder_destroy(struct drm_encoder *encoder) | 3372 | void intel_encoder_destroy(struct drm_encoder *encoder) |
@@ -4479,6 +4540,20 @@ static void sandybridge_update_wm(struct drm_device *dev) | |||
4479 | enabled |= 2; | 4540 | enabled |= 2; |
4480 | } | 4541 | } |
4481 | 4542 | ||
4543 | /* IVB has 3 pipes */ | ||
4544 | if (IS_IVYBRIDGE(dev) && | ||
4545 | g4x_compute_wm0(dev, 2, | ||
4546 | &sandybridge_display_wm_info, latency, | ||
4547 | &sandybridge_cursor_wm_info, latency, | ||
4548 | &plane_wm, &cursor_wm)) { | ||
4549 | I915_WRITE(WM0_PIPEC_IVB, | ||
4550 | (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm); | ||
4551 | DRM_DEBUG_KMS("FIFO watermarks For pipe C -" | ||
4552 | " plane %d, cursor: %d\n", | ||
4553 | plane_wm, cursor_wm); | ||
4554 | enabled |= 3; | ||
4555 | } | ||
4556 | |||
4482 | /* | 4557 | /* |
4483 | * Calculate and update the self-refresh watermark only when one | 4558 | * Calculate and update the self-refresh watermark only when one |
4484 | * display plane is used. | 4559 | * display plane is used. |
@@ -4585,7 +4660,9 @@ static void intel_update_watermarks(struct drm_device *dev) | |||
4585 | 4660 | ||
4586 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) | 4661 | static inline bool intel_panel_use_ssc(struct drm_i915_private *dev_priv) |
4587 | { | 4662 | { |
4588 | return dev_priv->lvds_use_ssc && i915_panel_use_ssc | 4663 | if (i915_panel_use_ssc >= 0) |
4664 | return i915_panel_use_ssc != 0; | ||
4665 | return dev_priv->lvds_use_ssc | ||
4589 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); | 4666 | && !(dev_priv->quirks & QUIRK_LVDS_SSC_DISABLE); |
4590 | } | 4667 | } |
4591 | 4668 | ||
@@ -5108,36 +5185,52 @@ static int i9xx_crtc_mode_set(struct drm_crtc *crtc, | |||
5108 | return ret; | 5185 | return ret; |
5109 | } | 5186 | } |
5110 | 5187 | ||
5111 | static void ironlake_update_pch_refclk(struct drm_device *dev) | 5188 | /* |
5189 | * Initialize reference clocks when the driver loads | ||
5190 | */ | ||
5191 | void ironlake_init_pch_refclk(struct drm_device *dev) | ||
5112 | { | 5192 | { |
5113 | struct drm_i915_private *dev_priv = dev->dev_private; | 5193 | struct drm_i915_private *dev_priv = dev->dev_private; |
5114 | struct drm_mode_config *mode_config = &dev->mode_config; | 5194 | struct drm_mode_config *mode_config = &dev->mode_config; |
5115 | struct drm_crtc *crtc; | ||
5116 | struct intel_encoder *encoder; | 5195 | struct intel_encoder *encoder; |
5117 | struct intel_encoder *has_edp_encoder = NULL; | ||
5118 | u32 temp; | 5196 | u32 temp; |
5119 | bool has_lvds = false; | 5197 | bool has_lvds = false; |
5198 | bool has_cpu_edp = false; | ||
5199 | bool has_pch_edp = false; | ||
5200 | bool has_panel = false; | ||
5201 | bool has_ck505 = false; | ||
5202 | bool can_ssc = false; | ||
5120 | 5203 | ||
5121 | /* We need to take the global config into account */ | 5204 | /* We need to take the global config into account */ |
5122 | list_for_each_entry(crtc, &mode_config->crtc_list, head) { | 5205 | list_for_each_entry(encoder, &mode_config->encoder_list, |
5123 | if (!crtc->enabled) | 5206 | base.head) { |
5124 | continue; | 5207 | switch (encoder->type) { |
5125 | 5208 | case INTEL_OUTPUT_LVDS: | |
5126 | list_for_each_entry(encoder, &mode_config->encoder_list, | 5209 | has_panel = true; |
5127 | base.head) { | 5210 | has_lvds = true; |
5128 | if (encoder->base.crtc != crtc) | 5211 | break; |
5129 | continue; | 5212 | case INTEL_OUTPUT_EDP: |
5130 | 5213 | has_panel = true; | |
5131 | switch (encoder->type) { | 5214 | if (intel_encoder_is_pch_edp(&encoder->base)) |
5132 | case INTEL_OUTPUT_LVDS: | 5215 | has_pch_edp = true; |
5133 | has_lvds = true; | 5216 | else |
5134 | case INTEL_OUTPUT_EDP: | 5217 | has_cpu_edp = true; |
5135 | has_edp_encoder = encoder; | 5218 | break; |
5136 | break; | ||
5137 | } | ||
5138 | } | 5219 | } |
5139 | } | 5220 | } |
5140 | 5221 | ||
5222 | if (HAS_PCH_IBX(dev)) { | ||
5223 | has_ck505 = dev_priv->display_clock_mode; | ||
5224 | can_ssc = has_ck505; | ||
5225 | } else { | ||
5226 | has_ck505 = false; | ||
5227 | can_ssc = true; | ||
5228 | } | ||
5229 | |||
5230 | DRM_DEBUG_KMS("has_panel %d has_lvds %d has_pch_edp %d has_cpu_edp %d has_ck505 %d\n", | ||
5231 | has_panel, has_lvds, has_pch_edp, has_cpu_edp, | ||
5232 | has_ck505); | ||
5233 | |||
5141 | /* Ironlake: try to setup display ref clock before DPLL | 5234 | /* Ironlake: try to setup display ref clock before DPLL |
5142 | * enabling. This is only under driver's control after | 5235 | * enabling. This is only under driver's control after |
5143 | * PCH B stepping, previous chipset stepping should be | 5236 | * PCH B stepping, previous chipset stepping should be |
@@ -5146,41 +5239,100 @@ static void ironlake_update_pch_refclk(struct drm_device *dev) | |||
5146 | temp = I915_READ(PCH_DREF_CONTROL); | 5239 | temp = I915_READ(PCH_DREF_CONTROL); |
5147 | /* Always enable nonspread source */ | 5240 | /* Always enable nonspread source */ |
5148 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; | 5241 | temp &= ~DREF_NONSPREAD_SOURCE_MASK; |
5149 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5150 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5151 | temp |= DREF_SSC_SOURCE_ENABLE; | ||
5152 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5153 | 5242 | ||
5154 | POSTING_READ(PCH_DREF_CONTROL); | 5243 | if (has_ck505) |
5155 | udelay(200); | 5244 | temp |= DREF_NONSPREAD_CK505_ENABLE; |
5245 | else | ||
5246 | temp |= DREF_NONSPREAD_SOURCE_ENABLE; | ||
5156 | 5247 | ||
5157 | if (has_edp_encoder) { | 5248 | if (has_panel) { |
5158 | if (intel_panel_use_ssc(dev_priv)) { | 5249 | temp &= ~DREF_SSC_SOURCE_MASK; |
5159 | temp |= DREF_SSC1_ENABLE; | 5250 | temp |= DREF_SSC_SOURCE_ENABLE; |
5160 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5161 | 5251 | ||
5162 | POSTING_READ(PCH_DREF_CONTROL); | 5252 | /* SSC must be turned on before enabling the CPU output */ |
5163 | udelay(200); | 5253 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5254 | DRM_DEBUG_KMS("Using SSC on panel\n"); | ||
5255 | temp |= DREF_SSC1_ENABLE; | ||
5164 | } | 5256 | } |
5257 | |||
5258 | /* Get SSC going before enabling the outputs */ | ||
5259 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5260 | POSTING_READ(PCH_DREF_CONTROL); | ||
5261 | udelay(200); | ||
5262 | |||
5165 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | 5263 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; |
5166 | 5264 | ||
5167 | /* Enable CPU source on CPU attached eDP */ | 5265 | /* Enable CPU source on CPU attached eDP */ |
5168 | if (!intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5266 | if (has_cpu_edp) { |
5169 | if (intel_panel_use_ssc(dev_priv)) | 5267 | if (intel_panel_use_ssc(dev_priv) && can_ssc) { |
5268 | DRM_DEBUG_KMS("Using SSC on eDP\n"); | ||
5170 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; | 5269 | temp |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD; |
5270 | } | ||
5171 | else | 5271 | else |
5172 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; | 5272 | temp |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD; |
5173 | } else { | 5273 | } else |
5174 | /* Enable SSC on PCH eDP if needed */ | 5274 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; |
5175 | if (intel_panel_use_ssc(dev_priv)) { | 5275 | |
5176 | DRM_ERROR("enabling SSC on PCH\n"); | ||
5177 | temp |= DREF_SUPERSPREAD_SOURCE_ENABLE; | ||
5178 | } | ||
5179 | } | ||
5180 | I915_WRITE(PCH_DREF_CONTROL, temp); | 5276 | I915_WRITE(PCH_DREF_CONTROL, temp); |
5181 | POSTING_READ(PCH_DREF_CONTROL); | 5277 | POSTING_READ(PCH_DREF_CONTROL); |
5182 | udelay(200); | 5278 | udelay(200); |
5279 | } else { | ||
5280 | DRM_DEBUG_KMS("Disabling SSC entirely\n"); | ||
5281 | |||
5282 | temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK; | ||
5283 | |||
5284 | /* Turn off CPU output */ | ||
5285 | temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE; | ||
5286 | |||
5287 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5288 | POSTING_READ(PCH_DREF_CONTROL); | ||
5289 | udelay(200); | ||
5290 | |||
5291 | /* Turn off the SSC source */ | ||
5292 | temp &= ~DREF_SSC_SOURCE_MASK; | ||
5293 | temp |= DREF_SSC_SOURCE_DISABLE; | ||
5294 | |||
5295 | /* Turn off SSC1 */ | ||
5296 | temp &= ~ DREF_SSC1_ENABLE; | ||
5297 | |||
5298 | I915_WRITE(PCH_DREF_CONTROL, temp); | ||
5299 | POSTING_READ(PCH_DREF_CONTROL); | ||
5300 | udelay(200); | ||
5301 | } | ||
5302 | } | ||
5303 | |||
5304 | static int ironlake_get_refclk(struct drm_crtc *crtc) | ||
5305 | { | ||
5306 | struct drm_device *dev = crtc->dev; | ||
5307 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5308 | struct intel_encoder *encoder; | ||
5309 | struct drm_mode_config *mode_config = &dev->mode_config; | ||
5310 | struct intel_encoder *edp_encoder = NULL; | ||
5311 | int num_connectors = 0; | ||
5312 | bool is_lvds = false; | ||
5313 | |||
5314 | list_for_each_entry(encoder, &mode_config->encoder_list, base.head) { | ||
5315 | if (encoder->base.crtc != crtc) | ||
5316 | continue; | ||
5317 | |||
5318 | switch (encoder->type) { | ||
5319 | case INTEL_OUTPUT_LVDS: | ||
5320 | is_lvds = true; | ||
5321 | break; | ||
5322 | case INTEL_OUTPUT_EDP: | ||
5323 | edp_encoder = encoder; | ||
5324 | break; | ||
5325 | } | ||
5326 | num_connectors++; | ||
5183 | } | 5327 | } |
5328 | |||
5329 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | ||
5330 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
5331 | dev_priv->lvds_ssc_freq); | ||
5332 | return dev_priv->lvds_ssc_freq * 1000; | ||
5333 | } | ||
5334 | |||
5335 | return 120000; | ||
5184 | } | 5336 | } |
5185 | 5337 | ||
5186 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | 5338 | static int ironlake_crtc_mode_set(struct drm_crtc *crtc, |
@@ -5242,16 +5394,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5242 | num_connectors++; | 5394 | num_connectors++; |
5243 | } | 5395 | } |
5244 | 5396 | ||
5245 | if (is_lvds && intel_panel_use_ssc(dev_priv) && num_connectors < 2) { | 5397 | refclk = ironlake_get_refclk(crtc); |
5246 | refclk = dev_priv->lvds_ssc_freq * 1000; | ||
5247 | DRM_DEBUG_KMS("using SSC reference clock of %d MHz\n", | ||
5248 | refclk / 1000); | ||
5249 | } else { | ||
5250 | refclk = 96000; | ||
5251 | if (!has_edp_encoder || | ||
5252 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) | ||
5253 | refclk = 120000; /* 120Mhz refclk */ | ||
5254 | } | ||
5255 | 5398 | ||
5256 | /* | 5399 | /* |
5257 | * Returns a set of divisors for the desired target clock with the given | 5400 | * Returns a set of divisors for the desired target clock with the given |
@@ -5378,8 +5521,6 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5378 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, | 5521 | ironlake_compute_m_n(intel_crtc->bpp, lane, target_clock, link_bw, |
5379 | &m_n); | 5522 | &m_n); |
5380 | 5523 | ||
5381 | ironlake_update_pch_refclk(dev); | ||
5382 | |||
5383 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; | 5524 | fp = clock.n << 16 | clock.m1 << 8 | clock.m2; |
5384 | if (has_reduced_clock) | 5525 | if (has_reduced_clock) |
5385 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | | 5526 | fp2 = reduced_clock.n << 16 | reduced_clock.m1 << 8 | |
@@ -5451,39 +5592,32 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5451 | /* Set up the display plane register */ | 5592 | /* Set up the display plane register */ |
5452 | dspcntr = DISPPLANE_GAMMA_ENABLE; | 5593 | dspcntr = DISPPLANE_GAMMA_ENABLE; |
5453 | 5594 | ||
5454 | DRM_DEBUG_KMS("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); | 5595 | DRM_DEBUG_KMS("Mode for pipe %d:\n", pipe); |
5455 | drm_mode_debug_printmodeline(mode); | 5596 | drm_mode_debug_printmodeline(mode); |
5456 | 5597 | ||
5457 | /* PCH eDP needs FDI, but CPU eDP does not */ | 5598 | /* PCH eDP needs FDI, but CPU eDP does not */ |
5458 | if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5599 | if (!intel_crtc->no_pll) { |
5459 | I915_WRITE(PCH_FP0(pipe), fp); | 5600 | if (!has_edp_encoder || |
5460 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); | 5601 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { |
5461 | 5602 | I915_WRITE(PCH_FP0(pipe), fp); | |
5462 | POSTING_READ(PCH_DPLL(pipe)); | 5603 | I915_WRITE(PCH_DPLL(pipe), dpll & ~DPLL_VCO_ENABLE); |
5463 | udelay(150); | ||
5464 | } | ||
5465 | 5604 | ||
5466 | /* enable transcoder DPLL */ | 5605 | POSTING_READ(PCH_DPLL(pipe)); |
5467 | if (HAS_PCH_CPT(dev)) { | 5606 | udelay(150); |
5468 | temp = I915_READ(PCH_DPLL_SEL); | 5607 | } |
5469 | switch (pipe) { | 5608 | } else { |
5470 | case 0: | 5609 | if (dpll == (I915_READ(PCH_DPLL(0)) & 0x7fffffff) && |
5471 | temp |= TRANSA_DPLL_ENABLE | TRANSA_DPLLA_SEL; | 5610 | fp == I915_READ(PCH_FP0(0))) { |
5472 | break; | 5611 | intel_crtc->use_pll_a = true; |
5473 | case 1: | 5612 | DRM_DEBUG_KMS("using pipe a dpll\n"); |
5474 | temp |= TRANSB_DPLL_ENABLE | TRANSB_DPLLB_SEL; | 5613 | } else if (dpll == (I915_READ(PCH_DPLL(1)) & 0x7fffffff) && |
5475 | break; | 5614 | fp == I915_READ(PCH_FP0(1))) { |
5476 | case 2: | 5615 | intel_crtc->use_pll_a = false; |
5477 | /* FIXME: manage transcoder PLLs? */ | 5616 | DRM_DEBUG_KMS("using pipe b dpll\n"); |
5478 | temp |= TRANSC_DPLL_ENABLE | TRANSC_DPLLB_SEL; | 5617 | } else { |
5479 | break; | 5618 | DRM_DEBUG_KMS("no matching PLL configuration for pipe 2\n"); |
5480 | default: | 5619 | return -EINVAL; |
5481 | BUG(); | ||
5482 | } | 5620 | } |
5483 | I915_WRITE(PCH_DPLL_SEL, temp); | ||
5484 | |||
5485 | POSTING_READ(PCH_DPLL_SEL); | ||
5486 | udelay(150); | ||
5487 | } | 5621 | } |
5488 | 5622 | ||
5489 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. | 5623 | /* The LVDS pin pair needs to be on before the DPLLs are enabled. |
@@ -5493,17 +5627,13 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5493 | if (is_lvds) { | 5627 | if (is_lvds) { |
5494 | temp = I915_READ(PCH_LVDS); | 5628 | temp = I915_READ(PCH_LVDS); |
5495 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; | 5629 | temp |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP; |
5496 | if (pipe == 1) { | 5630 | if (HAS_PCH_CPT(dev)) |
5497 | if (HAS_PCH_CPT(dev)) | 5631 | temp |= PORT_TRANS_SEL_CPT(pipe); |
5498 | temp |= PORT_TRANS_B_SEL_CPT; | 5632 | else if (pipe == 1) |
5499 | else | 5633 | temp |= LVDS_PIPEB_SELECT; |
5500 | temp |= LVDS_PIPEB_SELECT; | 5634 | else |
5501 | } else { | 5635 | temp &= ~LVDS_PIPEB_SELECT; |
5502 | if (HAS_PCH_CPT(dev)) | 5636 | |
5503 | temp &= ~PORT_TRANS_SEL_MASK; | ||
5504 | else | ||
5505 | temp &= ~LVDS_PIPEB_SELECT; | ||
5506 | } | ||
5507 | /* set the corresponsding LVDS_BORDER bit */ | 5637 | /* set the corresponsding LVDS_BORDER bit */ |
5508 | temp |= dev_priv->lvds_border_bits; | 5638 | temp |= dev_priv->lvds_border_bits; |
5509 | /* Set the B0-B3 data pairs corresponding to whether we're going to | 5639 | /* Set the B0-B3 data pairs corresponding to whether we're going to |
@@ -5553,8 +5683,9 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5553 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); | 5683 | I915_WRITE(TRANSDPLINK_N1(pipe), 0); |
5554 | } | 5684 | } |
5555 | 5685 | ||
5556 | if (!has_edp_encoder || | 5686 | if (!intel_crtc->no_pll && |
5557 | intel_encoder_is_pch_edp(&has_edp_encoder->base)) { | 5687 | (!has_edp_encoder || |
5688 | intel_encoder_is_pch_edp(&has_edp_encoder->base))) { | ||
5558 | I915_WRITE(PCH_DPLL(pipe), dpll); | 5689 | I915_WRITE(PCH_DPLL(pipe), dpll); |
5559 | 5690 | ||
5560 | /* Wait for the clocks to stabilize. */ | 5691 | /* Wait for the clocks to stabilize. */ |
@@ -5570,18 +5701,20 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc, | |||
5570 | } | 5701 | } |
5571 | 5702 | ||
5572 | intel_crtc->lowfreq_avail = false; | 5703 | intel_crtc->lowfreq_avail = false; |
5573 | if (is_lvds && has_reduced_clock && i915_powersave) { | 5704 | if (!intel_crtc->no_pll) { |
5574 | I915_WRITE(PCH_FP1(pipe), fp2); | 5705 | if (is_lvds && has_reduced_clock && i915_powersave) { |
5575 | intel_crtc->lowfreq_avail = true; | 5706 | I915_WRITE(PCH_FP1(pipe), fp2); |
5576 | if (HAS_PIPE_CXSR(dev)) { | 5707 | intel_crtc->lowfreq_avail = true; |
5577 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); | 5708 | if (HAS_PIPE_CXSR(dev)) { |
5578 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; | 5709 | DRM_DEBUG_KMS("enabling CxSR downclocking\n"); |
5579 | } | 5710 | pipeconf |= PIPECONF_CXSR_DOWNCLOCK; |
5580 | } else { | 5711 | } |
5581 | I915_WRITE(PCH_FP1(pipe), fp); | 5712 | } else { |
5582 | if (HAS_PIPE_CXSR(dev)) { | 5713 | I915_WRITE(PCH_FP1(pipe), fp); |
5583 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); | 5714 | if (HAS_PIPE_CXSR(dev)) { |
5584 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | 5715 | DRM_DEBUG_KMS("disabling CxSR downclocking\n"); |
5716 | pipeconf &= ~PIPECONF_CXSR_DOWNCLOCK; | ||
5717 | } | ||
5585 | } | 5718 | } |
5586 | } | 5719 | } |
5587 | 5720 | ||
@@ -5884,6 +6017,31 @@ static void i9xx_update_cursor(struct drm_crtc *crtc, u32 base) | |||
5884 | I915_WRITE(CURBASE(pipe), base); | 6017 | I915_WRITE(CURBASE(pipe), base); |
5885 | } | 6018 | } |
5886 | 6019 | ||
6020 | static void ivb_update_cursor(struct drm_crtc *crtc, u32 base) | ||
6021 | { | ||
6022 | struct drm_device *dev = crtc->dev; | ||
6023 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
6024 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | ||
6025 | int pipe = intel_crtc->pipe; | ||
6026 | bool visible = base != 0; | ||
6027 | |||
6028 | if (intel_crtc->cursor_visible != visible) { | ||
6029 | uint32_t cntl = I915_READ(CURCNTR_IVB(pipe)); | ||
6030 | if (base) { | ||
6031 | cntl &= ~CURSOR_MODE; | ||
6032 | cntl |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; | ||
6033 | } else { | ||
6034 | cntl &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE); | ||
6035 | cntl |= CURSOR_MODE_DISABLE; | ||
6036 | } | ||
6037 | I915_WRITE(CURCNTR_IVB(pipe), cntl); | ||
6038 | |||
6039 | intel_crtc->cursor_visible = visible; | ||
6040 | } | ||
6041 | /* and commit changes on next vblank */ | ||
6042 | I915_WRITE(CURBASE_IVB(pipe), base); | ||
6043 | } | ||
6044 | |||
5887 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ | 6045 | /* If no-part of the cursor is visible on the framebuffer, then the GPU may hang... */ |
5888 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, | 6046 | static void intel_crtc_update_cursor(struct drm_crtc *crtc, |
5889 | bool on) | 6047 | bool on) |
@@ -5931,11 +6089,16 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc, | |||
5931 | if (!visible && !intel_crtc->cursor_visible) | 6089 | if (!visible && !intel_crtc->cursor_visible) |
5932 | return; | 6090 | return; |
5933 | 6091 | ||
5934 | I915_WRITE(CURPOS(pipe), pos); | 6092 | if (IS_IVYBRIDGE(dev)) { |
5935 | if (IS_845G(dev) || IS_I865G(dev)) | 6093 | I915_WRITE(CURPOS_IVB(pipe), pos); |
5936 | i845_update_cursor(crtc, base); | 6094 | ivb_update_cursor(crtc, base); |
5937 | else | 6095 | } else { |
5938 | i9xx_update_cursor(crtc, base); | 6096 | I915_WRITE(CURPOS(pipe), pos); |
6097 | if (IS_845G(dev) || IS_I865G(dev)) | ||
6098 | i845_update_cursor(crtc, base); | ||
6099 | else | ||
6100 | i9xx_update_cursor(crtc, base); | ||
6101 | } | ||
5939 | 6102 | ||
5940 | if (visible) | 6103 | if (visible) |
5941 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); | 6104 | intel_mark_busy(dev, to_intel_framebuffer(crtc->fb)->obj); |
@@ -7197,6 +7360,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
7197 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ | 7360 | intel_crtc->bpp = 24; /* default for pre-Ironlake */ |
7198 | 7361 | ||
7199 | if (HAS_PCH_SPLIT(dev)) { | 7362 | if (HAS_PCH_SPLIT(dev)) { |
7363 | if (pipe == 2 && IS_IVYBRIDGE(dev)) | ||
7364 | intel_crtc->no_pll = true; | ||
7200 | intel_helper_funcs.prepare = ironlake_crtc_prepare; | 7365 | intel_helper_funcs.prepare = ironlake_crtc_prepare; |
7201 | intel_helper_funcs.commit = ironlake_crtc_commit; | 7366 | intel_helper_funcs.commit = ironlake_crtc_commit; |
7202 | } else { | 7367 | } else { |
@@ -7376,6 +7541,9 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
7376 | 7541 | ||
7377 | /* disable all the possible outputs/crtcs before entering KMS mode */ | 7542 | /* disable all the possible outputs/crtcs before entering KMS mode */ |
7378 | drm_helper_disable_unused_functions(dev); | 7543 | drm_helper_disable_unused_functions(dev); |
7544 | |||
7545 | if (HAS_PCH_SPLIT(dev)) | ||
7546 | ironlake_init_pch_refclk(dev); | ||
7379 | } | 7547 | } |
7380 | 7548 | ||
7381 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) | 7549 | static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb) |
@@ -7620,6 +7788,10 @@ void gen6_disable_rps(struct drm_device *dev) | |||
7620 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); | 7788 | I915_WRITE(GEN6_RPNSWREQ, 1 << 31); |
7621 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); | 7789 | I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); |
7622 | I915_WRITE(GEN6_PMIER, 0); | 7790 | I915_WRITE(GEN6_PMIER, 0); |
7791 | /* Complete PM interrupt masking here doesn't race with the rps work | ||
7792 | * item again unmasking PM interrupts because that is using a different | ||
7793 | * register (PMIMR) to mask PM interrupts. The only risk is in leaving | ||
7794 | * stale bits in PMIIR and PMIMR which gen6_enable_rps will clean up. */ | ||
7623 | 7795 | ||
7624 | spin_lock_irq(&dev_priv->rps_lock); | 7796 | spin_lock_irq(&dev_priv->rps_lock); |
7625 | dev_priv->pm_iir = 0; | 7797 | dev_priv->pm_iir = 0; |
@@ -8617,6 +8789,7 @@ void intel_modeset_cleanup(struct drm_device *dev) | |||
8617 | * enqueue unpin/hotplug work. */ | 8789 | * enqueue unpin/hotplug work. */ |
8618 | drm_irq_uninstall(dev); | 8790 | drm_irq_uninstall(dev); |
8619 | cancel_work_sync(&dev_priv->hotplug_work); | 8791 | cancel_work_sync(&dev_priv->hotplug_work); |
8792 | cancel_work_sync(&dev_priv->rps_work); | ||
8620 | 8793 | ||
8621 | /* flush any delayed tasks or pending work */ | 8794 | /* flush any delayed tasks or pending work */ |
8622 | flush_scheduled_work(); | 8795 | flush_scheduled_work(); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index 6cbde9ff1ec6..fc1a0832af4f 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -36,7 +36,7 @@ | |||
36 | #include "i915_drv.h" | 36 | #include "i915_drv.h" |
37 | #include "drm_dp_helper.h" | 37 | #include "drm_dp_helper.h" |
38 | 38 | ||
39 | 39 | #define DP_RECEIVER_CAP_SIZE 0xf | |
40 | #define DP_LINK_STATUS_SIZE 6 | 40 | #define DP_LINK_STATUS_SIZE 6 |
41 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) | 41 | #define DP_LINK_CHECK_TIMEOUT (10 * 1000) |
42 | 42 | ||
@@ -53,12 +53,21 @@ struct intel_dp { | |||
53 | int dpms_mode; | 53 | int dpms_mode; |
54 | uint8_t link_bw; | 54 | uint8_t link_bw; |
55 | uint8_t lane_count; | 55 | uint8_t lane_count; |
56 | uint8_t dpcd[8]; | 56 | uint8_t dpcd[DP_RECEIVER_CAP_SIZE]; |
57 | struct i2c_adapter adapter; | 57 | struct i2c_adapter adapter; |
58 | struct i2c_algo_dp_aux_data algo; | 58 | struct i2c_algo_dp_aux_data algo; |
59 | bool is_pch_edp; | 59 | bool is_pch_edp; |
60 | uint8_t train_set[4]; | 60 | uint8_t train_set[4]; |
61 | uint8_t link_status[DP_LINK_STATUS_SIZE]; | 61 | uint8_t link_status[DP_LINK_STATUS_SIZE]; |
62 | int panel_power_up_delay; | ||
63 | int panel_power_down_delay; | ||
64 | int panel_power_cycle_delay; | ||
65 | int backlight_on_delay; | ||
66 | int backlight_off_delay; | ||
67 | struct drm_display_mode *panel_fixed_mode; /* for eDP */ | ||
68 | struct delayed_work panel_vdd_work; | ||
69 | bool want_panel_vdd; | ||
70 | unsigned long panel_off_jiffies; | ||
62 | }; | 71 | }; |
63 | 72 | ||
64 | /** | 73 | /** |
@@ -86,6 +95,17 @@ static bool is_pch_edp(struct intel_dp *intel_dp) | |||
86 | return intel_dp->is_pch_edp; | 95 | return intel_dp->is_pch_edp; |
87 | } | 96 | } |
88 | 97 | ||
98 | /** | ||
99 | * is_cpu_edp - is the port on the CPU and attached to an eDP panel? | ||
100 | * @intel_dp: DP struct | ||
101 | * | ||
102 | * Returns true if the given DP struct corresponds to a CPU eDP port. | ||
103 | */ | ||
104 | static bool is_cpu_edp(struct intel_dp *intel_dp) | ||
105 | { | ||
106 | return is_edp(intel_dp) && !is_pch_edp(intel_dp); | ||
107 | } | ||
108 | |||
89 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) | 109 | static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) |
90 | { | 110 | { |
91 | return container_of(encoder, struct intel_dp, base.base); | 111 | return container_of(encoder, struct intel_dp, base.base); |
@@ -175,9 +195,25 @@ intel_dp_link_clock(uint8_t link_bw) | |||
175 | return 162000; | 195 | return 162000; |
176 | } | 196 | } |
177 | 197 | ||
178 | /* I think this is a fiction */ | 198 | /* |
199 | * The units on the numbers in the next two are... bizarre. Examples will | ||
200 | * make it clearer; this one parallels an example in the eDP spec. | ||
201 | * | ||
202 | * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: | ||
203 | * | ||
204 | * 270000 * 1 * 8 / 10 == 216000 | ||
205 | * | ||
206 | * The actual data capacity of that configuration is 2.16Gbit/s, so the | ||
207 | * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - | ||
208 | * or equivalently, kilopixels per second - so for 1680x1050R it'd be | ||
209 | * 119000. At 18bpp that's 2142000 kilobits per second. | ||
210 | * | ||
211 | * Thus the strange-looking division by 10 in intel_dp_link_required, to | ||
212 | * get the result in decakilobits instead of kilobits. | ||
213 | */ | ||
214 | |||
179 | static int | 215 | static int |
180 | intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pixel_clock) | 216 | intel_dp_link_required(struct intel_dp *intel_dp, int pixel_clock) |
181 | { | 217 | { |
182 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 218 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
183 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 219 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -186,7 +222,7 @@ intel_dp_link_required(struct drm_device *dev, struct intel_dp *intel_dp, int pi | |||
186 | if (intel_crtc) | 222 | if (intel_crtc) |
187 | bpp = intel_crtc->bpp; | 223 | bpp = intel_crtc->bpp; |
188 | 224 | ||
189 | return (pixel_clock * bpp + 7) / 8; | 225 | return (pixel_clock * bpp + 9) / 10; |
190 | } | 226 | } |
191 | 227 | ||
192 | static int | 228 | static int |
@@ -200,24 +236,19 @@ intel_dp_mode_valid(struct drm_connector *connector, | |||
200 | struct drm_display_mode *mode) | 236 | struct drm_display_mode *mode) |
201 | { | 237 | { |
202 | struct intel_dp *intel_dp = intel_attached_dp(connector); | 238 | struct intel_dp *intel_dp = intel_attached_dp(connector); |
203 | struct drm_device *dev = connector->dev; | ||
204 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
205 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); | 239 | int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); |
206 | int max_lanes = intel_dp_max_lane_count(intel_dp); | 240 | int max_lanes = intel_dp_max_lane_count(intel_dp); |
207 | 241 | ||
208 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { | 242 | if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { |
209 | if (mode->hdisplay > dev_priv->panel_fixed_mode->hdisplay) | 243 | if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) |
210 | return MODE_PANEL; | 244 | return MODE_PANEL; |
211 | 245 | ||
212 | if (mode->vdisplay > dev_priv->panel_fixed_mode->vdisplay) | 246 | if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) |
213 | return MODE_PANEL; | 247 | return MODE_PANEL; |
214 | } | 248 | } |
215 | 249 | ||
216 | /* only refuse the mode on non eDP since we have seen some weird eDP panels | 250 | if (intel_dp_link_required(intel_dp, mode->clock) |
217 | which are outside spec tolerances but somehow work by magic */ | 251 | > intel_dp_max_data_rate(max_link_clock, max_lanes)) |
218 | if (!is_edp(intel_dp) && | ||
219 | (intel_dp_link_required(connector->dev, intel_dp, mode->clock) | ||
220 | > intel_dp_max_data_rate(max_link_clock, max_lanes))) | ||
221 | return MODE_CLOCK_HIGH; | 252 | return MODE_CLOCK_HIGH; |
222 | 253 | ||
223 | if (mode->clock < 10000) | 254 | if (mode->clock < 10000) |
@@ -279,6 +310,38 @@ intel_hrawclk(struct drm_device *dev) | |||
279 | } | 310 | } |
280 | } | 311 | } |
281 | 312 | ||
313 | static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) | ||
314 | { | ||
315 | struct drm_device *dev = intel_dp->base.base.dev; | ||
316 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
317 | |||
318 | return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; | ||
319 | } | ||
320 | |||
321 | static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) | ||
322 | { | ||
323 | struct drm_device *dev = intel_dp->base.base.dev; | ||
324 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
325 | |||
326 | return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; | ||
327 | } | ||
328 | |||
329 | static void | ||
330 | intel_dp_check_edp(struct intel_dp *intel_dp) | ||
331 | { | ||
332 | struct drm_device *dev = intel_dp->base.base.dev; | ||
333 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
334 | |||
335 | if (!is_edp(intel_dp)) | ||
336 | return; | ||
337 | if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { | ||
338 | WARN(1, "eDP powered off while attempting aux channel communication.\n"); | ||
339 | DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", | ||
340 | I915_READ(PCH_PP_STATUS), | ||
341 | I915_READ(PCH_PP_CONTROL)); | ||
342 | } | ||
343 | } | ||
344 | |||
282 | static int | 345 | static int |
283 | intel_dp_aux_ch(struct intel_dp *intel_dp, | 346 | intel_dp_aux_ch(struct intel_dp *intel_dp, |
284 | uint8_t *send, int send_bytes, | 347 | uint8_t *send, int send_bytes, |
@@ -295,6 +358,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
295 | uint32_t aux_clock_divider; | 358 | uint32_t aux_clock_divider; |
296 | int try, precharge; | 359 | int try, precharge; |
297 | 360 | ||
361 | intel_dp_check_edp(intel_dp); | ||
298 | /* The clock divider is based off the hrawclk, | 362 | /* The clock divider is based off the hrawclk, |
299 | * and would like to run at 2MHz. So, take the | 363 | * and would like to run at 2MHz. So, take the |
300 | * hrawclk value and divide by 2 and use that | 364 | * hrawclk value and divide by 2 and use that |
@@ -302,7 +366,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp, | |||
302 | * Note that PCH attached eDP panels should use a 125MHz input | 366 | * Note that PCH attached eDP panels should use a 125MHz input |
303 | * clock divider. | 367 | * clock divider. |
304 | */ | 368 | */ |
305 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { | 369 | if (is_cpu_edp(intel_dp)) { |
306 | if (IS_GEN6(dev)) | 370 | if (IS_GEN6(dev)) |
307 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ | 371 | aux_clock_divider = 200; /* SNB eDP input clock at 400Mhz */ |
308 | else | 372 | else |
@@ -408,6 +472,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp, | |||
408 | int msg_bytes; | 472 | int msg_bytes; |
409 | uint8_t ack; | 473 | uint8_t ack; |
410 | 474 | ||
475 | intel_dp_check_edp(intel_dp); | ||
411 | if (send_bytes > 16) | 476 | if (send_bytes > 16) |
412 | return -1; | 477 | return -1; |
413 | msg[0] = AUX_NATIVE_WRITE << 4; | 478 | msg[0] = AUX_NATIVE_WRITE << 4; |
@@ -450,6 +515,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp, | |||
450 | uint8_t ack; | 515 | uint8_t ack; |
451 | int ret; | 516 | int ret; |
452 | 517 | ||
518 | intel_dp_check_edp(intel_dp); | ||
453 | msg[0] = AUX_NATIVE_READ << 4; | 519 | msg[0] = AUX_NATIVE_READ << 4; |
454 | msg[1] = address >> 8; | 520 | msg[1] = address >> 8; |
455 | msg[2] = address & 0xff; | 521 | msg[2] = address & 0xff; |
@@ -493,6 +559,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
493 | int reply_bytes; | 559 | int reply_bytes; |
494 | int ret; | 560 | int ret; |
495 | 561 | ||
562 | intel_dp_check_edp(intel_dp); | ||
496 | /* Set up the command byte */ | 563 | /* Set up the command byte */ |
497 | if (mode & MODE_I2C_READ) | 564 | if (mode & MODE_I2C_READ) |
498 | msg[0] = AUX_I2C_READ << 4; | 565 | msg[0] = AUX_I2C_READ << 4; |
@@ -573,10 +640,15 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
573 | return -EREMOTEIO; | 640 | return -EREMOTEIO; |
574 | } | 641 | } |
575 | 642 | ||
643 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); | ||
644 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); | ||
645 | |||
576 | static int | 646 | static int |
577 | intel_dp_i2c_init(struct intel_dp *intel_dp, | 647 | intel_dp_i2c_init(struct intel_dp *intel_dp, |
578 | struct intel_connector *intel_connector, const char *name) | 648 | struct intel_connector *intel_connector, const char *name) |
579 | { | 649 | { |
650 | int ret; | ||
651 | |||
580 | DRM_DEBUG_KMS("i2c_init %s\n", name); | 652 | DRM_DEBUG_KMS("i2c_init %s\n", name); |
581 | intel_dp->algo.running = false; | 653 | intel_dp->algo.running = false; |
582 | intel_dp->algo.address = 0; | 654 | intel_dp->algo.address = 0; |
@@ -590,7 +662,10 @@ intel_dp_i2c_init(struct intel_dp *intel_dp, | |||
590 | intel_dp->adapter.algo_data = &intel_dp->algo; | 662 | intel_dp->adapter.algo_data = &intel_dp->algo; |
591 | intel_dp->adapter.dev.parent = &intel_connector->base.kdev; | 663 | intel_dp->adapter.dev.parent = &intel_connector->base.kdev; |
592 | 664 | ||
593 | return i2c_dp_aux_add_bus(&intel_dp->adapter); | 665 | ironlake_edp_panel_vdd_on(intel_dp); |
666 | ret = i2c_dp_aux_add_bus(&intel_dp->adapter); | ||
667 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
668 | return ret; | ||
594 | } | 669 | } |
595 | 670 | ||
596 | static bool | 671 | static bool |
@@ -598,29 +673,28 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
598 | struct drm_display_mode *adjusted_mode) | 673 | struct drm_display_mode *adjusted_mode) |
599 | { | 674 | { |
600 | struct drm_device *dev = encoder->dev; | 675 | struct drm_device *dev = encoder->dev; |
601 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
602 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 676 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
603 | int lane_count, clock; | 677 | int lane_count, clock; |
604 | int max_lane_count = intel_dp_max_lane_count(intel_dp); | 678 | int max_lane_count = intel_dp_max_lane_count(intel_dp); |
605 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; | 679 | int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; |
606 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; | 680 | static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; |
607 | 681 | ||
608 | if (is_edp(intel_dp) && dev_priv->panel_fixed_mode) { | 682 | if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { |
609 | intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode); | 683 | intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); |
610 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, | 684 | intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, |
611 | mode, adjusted_mode); | 685 | mode, adjusted_mode); |
612 | /* | 686 | /* |
613 | * the mode->clock is used to calculate the Data&Link M/N | 687 | * the mode->clock is used to calculate the Data&Link M/N |
614 | * of the pipe. For the eDP the fixed clock should be used. | 688 | * of the pipe. For the eDP the fixed clock should be used. |
615 | */ | 689 | */ |
616 | mode->clock = dev_priv->panel_fixed_mode->clock; | 690 | mode->clock = intel_dp->panel_fixed_mode->clock; |
617 | } | 691 | } |
618 | 692 | ||
619 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 693 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
620 | for (clock = 0; clock <= max_clock; clock++) { | 694 | for (clock = 0; clock <= max_clock; clock++) { |
621 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 695 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
622 | 696 | ||
623 | if (intel_dp_link_required(encoder->dev, intel_dp, mode->clock) | 697 | if (intel_dp_link_required(intel_dp, mode->clock) |
624 | <= link_avail) { | 698 | <= link_avail) { |
625 | intel_dp->link_bw = bws[clock]; | 699 | intel_dp->link_bw = bws[clock]; |
626 | intel_dp->lane_count = lane_count; | 700 | intel_dp->lane_count = lane_count; |
@@ -634,19 +708,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
634 | } | 708 | } |
635 | } | 709 | } |
636 | 710 | ||
637 | if (is_edp(intel_dp)) { | ||
638 | /* okay we failed just pick the highest */ | ||
639 | intel_dp->lane_count = max_lane_count; | ||
640 | intel_dp->link_bw = bws[max_clock]; | ||
641 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
642 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
643 | "count %d clock %d\n", | ||
644 | intel_dp->link_bw, intel_dp->lane_count, | ||
645 | adjusted_mode->clock); | ||
646 | |||
647 | return true; | ||
648 | } | ||
649 | |||
650 | return false; | 711 | return false; |
651 | } | 712 | } |
652 | 713 | ||
@@ -740,6 +801,9 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | |||
740 | } | 801 | } |
741 | } | 802 | } |
742 | 803 | ||
804 | static void ironlake_edp_pll_on(struct drm_encoder *encoder); | ||
805 | static void ironlake_edp_pll_off(struct drm_encoder *encoder); | ||
806 | |||
743 | static void | 807 | static void |
744 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | 808 | intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, |
745 | struct drm_display_mode *adjusted_mode) | 809 | struct drm_display_mode *adjusted_mode) |
@@ -749,6 +813,14 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
749 | struct drm_crtc *crtc = intel_dp->base.base.crtc; | 813 | struct drm_crtc *crtc = intel_dp->base.base.crtc; |
750 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 814 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
751 | 815 | ||
816 | /* Turn on the eDP PLL if needed */ | ||
817 | if (is_edp(intel_dp)) { | ||
818 | if (!is_pch_edp(intel_dp)) | ||
819 | ironlake_edp_pll_on(encoder); | ||
820 | else | ||
821 | ironlake_edp_pll_off(encoder); | ||
822 | } | ||
823 | |||
752 | intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; | 824 | intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; |
753 | intel_dp->DP |= intel_dp->color_range; | 825 | intel_dp->DP |= intel_dp->color_range; |
754 | 826 | ||
@@ -757,7 +829,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
757 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 829 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
758 | intel_dp->DP |= DP_SYNC_VS_HIGH; | 830 | intel_dp->DP |= DP_SYNC_VS_HIGH; |
759 | 831 | ||
760 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 832 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
761 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; | 833 | intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; |
762 | else | 834 | else |
763 | intel_dp->DP |= DP_LINK_TRAIN_OFF; | 835 | intel_dp->DP |= DP_LINK_TRAIN_OFF; |
@@ -798,7 +870,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
798 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) | 870 | if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev)) |
799 | intel_dp->DP |= DP_PIPEB_SELECT; | 871 | intel_dp->DP |= DP_PIPEB_SELECT; |
800 | 872 | ||
801 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) { | 873 | if (is_cpu_edp(intel_dp)) { |
802 | /* don't miss out required setting for eDP */ | 874 | /* don't miss out required setting for eDP */ |
803 | intel_dp->DP |= DP_PLL_ENABLE; | 875 | intel_dp->DP |= DP_PLL_ENABLE; |
804 | if (adjusted_mode->clock < 200000) | 876 | if (adjusted_mode->clock < 200000) |
@@ -808,58 +880,150 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
808 | } | 880 | } |
809 | } | 881 | } |
810 | 882 | ||
883 | static void ironlake_wait_panel_off(struct intel_dp *intel_dp) | ||
884 | { | ||
885 | unsigned long off_time; | ||
886 | unsigned long delay; | ||
887 | |||
888 | DRM_DEBUG_KMS("Wait for panel power off time\n"); | ||
889 | |||
890 | if (ironlake_edp_have_panel_power(intel_dp) || | ||
891 | ironlake_edp_have_panel_vdd(intel_dp)) | ||
892 | { | ||
893 | DRM_DEBUG_KMS("Panel still on, no delay needed\n"); | ||
894 | return; | ||
895 | } | ||
896 | |||
897 | off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); | ||
898 | if (time_after(jiffies, off_time)) { | ||
899 | DRM_DEBUG_KMS("Time already passed"); | ||
900 | return; | ||
901 | } | ||
902 | delay = jiffies_to_msecs(off_time - jiffies); | ||
903 | if (delay > intel_dp->panel_power_down_delay) | ||
904 | delay = intel_dp->panel_power_down_delay; | ||
905 | DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); | ||
906 | msleep(delay); | ||
907 | } | ||
908 | |||
811 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) | 909 | static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) |
812 | { | 910 | { |
813 | struct drm_device *dev = intel_dp->base.base.dev; | 911 | struct drm_device *dev = intel_dp->base.base.dev; |
814 | struct drm_i915_private *dev_priv = dev->dev_private; | 912 | struct drm_i915_private *dev_priv = dev->dev_private; |
815 | u32 pp; | 913 | u32 pp; |
816 | 914 | ||
817 | /* | 915 | if (!is_edp(intel_dp)) |
818 | * If the panel wasn't on, make sure there's not a currently | 916 | return; |
819 | * active PP sequence before enabling AUX VDD. | 917 | DRM_DEBUG_KMS("Turn eDP VDD on\n"); |
820 | */ | ||
821 | if (!(I915_READ(PCH_PP_STATUS) & PP_ON)) | ||
822 | msleep(dev_priv->panel_t3); | ||
823 | 918 | ||
919 | WARN(intel_dp->want_panel_vdd, | ||
920 | "eDP VDD already requested on\n"); | ||
921 | |||
922 | intel_dp->want_panel_vdd = true; | ||
923 | if (ironlake_edp_have_panel_vdd(intel_dp)) { | ||
924 | DRM_DEBUG_KMS("eDP VDD already on\n"); | ||
925 | return; | ||
926 | } | ||
927 | |||
928 | ironlake_wait_panel_off(intel_dp); | ||
824 | pp = I915_READ(PCH_PP_CONTROL); | 929 | pp = I915_READ(PCH_PP_CONTROL); |
930 | pp &= ~PANEL_UNLOCK_MASK; | ||
931 | pp |= PANEL_UNLOCK_REGS; | ||
825 | pp |= EDP_FORCE_VDD; | 932 | pp |= EDP_FORCE_VDD; |
826 | I915_WRITE(PCH_PP_CONTROL, pp); | 933 | I915_WRITE(PCH_PP_CONTROL, pp); |
827 | POSTING_READ(PCH_PP_CONTROL); | 934 | POSTING_READ(PCH_PP_CONTROL); |
935 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", | ||
936 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); | ||
937 | |||
938 | /* | ||
939 | * If the panel wasn't on, delay before accessing aux channel | ||
940 | */ | ||
941 | if (!ironlake_edp_have_panel_power(intel_dp)) { | ||
942 | DRM_DEBUG_KMS("eDP was not running\n"); | ||
943 | msleep(intel_dp->panel_power_up_delay); | ||
944 | } | ||
828 | } | 945 | } |
829 | 946 | ||
830 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp) | 947 | static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) |
831 | { | 948 | { |
832 | struct drm_device *dev = intel_dp->base.base.dev; | 949 | struct drm_device *dev = intel_dp->base.base.dev; |
833 | struct drm_i915_private *dev_priv = dev->dev_private; | 950 | struct drm_i915_private *dev_priv = dev->dev_private; |
834 | u32 pp; | 951 | u32 pp; |
835 | 952 | ||
836 | pp = I915_READ(PCH_PP_CONTROL); | 953 | if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { |
837 | pp &= ~EDP_FORCE_VDD; | 954 | pp = I915_READ(PCH_PP_CONTROL); |
838 | I915_WRITE(PCH_PP_CONTROL, pp); | 955 | pp &= ~PANEL_UNLOCK_MASK; |
839 | POSTING_READ(PCH_PP_CONTROL); | 956 | pp |= PANEL_UNLOCK_REGS; |
957 | pp &= ~EDP_FORCE_VDD; | ||
958 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
959 | POSTING_READ(PCH_PP_CONTROL); | ||
960 | |||
961 | /* Make sure sequencer is idle before allowing subsequent activity */ | ||
962 | DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", | ||
963 | I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); | ||
964 | intel_dp->panel_off_jiffies = jiffies; | ||
965 | } | ||
966 | } | ||
840 | 967 | ||
841 | /* Make sure sequencer is idle before allowing subsequent activity */ | 968 | static void ironlake_panel_vdd_work(struct work_struct *__work) |
842 | msleep(dev_priv->panel_t12); | 969 | { |
970 | struct intel_dp *intel_dp = container_of(to_delayed_work(__work), | ||
971 | struct intel_dp, panel_vdd_work); | ||
972 | struct drm_device *dev = intel_dp->base.base.dev; | ||
973 | |||
974 | mutex_lock(&dev->struct_mutex); | ||
975 | ironlake_panel_vdd_off_sync(intel_dp); | ||
976 | mutex_unlock(&dev->struct_mutex); | ||
977 | } | ||
978 | |||
979 | static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) | ||
980 | { | ||
981 | if (!is_edp(intel_dp)) | ||
982 | return; | ||
983 | |||
984 | DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); | ||
985 | WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); | ||
986 | |||
987 | intel_dp->want_panel_vdd = false; | ||
988 | |||
989 | if (sync) { | ||
990 | ironlake_panel_vdd_off_sync(intel_dp); | ||
991 | } else { | ||
992 | /* | ||
993 | * Queue the timer to fire a long | ||
994 | * time from now (relative to the power down delay) | ||
995 | * to keep the panel power up across a sequence of operations | ||
996 | */ | ||
997 | schedule_delayed_work(&intel_dp->panel_vdd_work, | ||
998 | msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); | ||
999 | } | ||
843 | } | 1000 | } |
844 | 1001 | ||
845 | /* Returns true if the panel was already on when called */ | 1002 | /* Returns true if the panel was already on when called */ |
846 | static bool ironlake_edp_panel_on(struct intel_dp *intel_dp) | 1003 | static void ironlake_edp_panel_on(struct intel_dp *intel_dp) |
847 | { | 1004 | { |
848 | struct drm_device *dev = intel_dp->base.base.dev; | 1005 | struct drm_device *dev = intel_dp->base.base.dev; |
849 | struct drm_i915_private *dev_priv = dev->dev_private; | 1006 | struct drm_i915_private *dev_priv = dev->dev_private; |
850 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; | 1007 | u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; |
851 | 1008 | ||
852 | if (I915_READ(PCH_PP_STATUS) & PP_ON) | 1009 | if (!is_edp(intel_dp)) |
853 | return true; | 1010 | return; |
1011 | if (ironlake_edp_have_panel_power(intel_dp)) | ||
1012 | return; | ||
854 | 1013 | ||
1014 | ironlake_wait_panel_off(intel_dp); | ||
855 | pp = I915_READ(PCH_PP_CONTROL); | 1015 | pp = I915_READ(PCH_PP_CONTROL); |
1016 | pp &= ~PANEL_UNLOCK_MASK; | ||
1017 | pp |= PANEL_UNLOCK_REGS; | ||
1018 | |||
1019 | if (IS_GEN5(dev)) { | ||
1020 | /* ILK workaround: disable reset around power sequence */ | ||
1021 | pp &= ~PANEL_POWER_RESET; | ||
1022 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1023 | POSTING_READ(PCH_PP_CONTROL); | ||
1024 | } | ||
856 | 1025 | ||
857 | /* ILK workaround: disable reset around power sequence */ | 1026 | pp |= POWER_TARGET_ON; |
858 | pp &= ~PANEL_POWER_RESET; | ||
859 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
860 | POSTING_READ(PCH_PP_CONTROL); | ||
861 | |||
862 | pp |= PANEL_UNLOCK_REGS | POWER_TARGET_ON; | ||
863 | I915_WRITE(PCH_PP_CONTROL, pp); | 1027 | I915_WRITE(PCH_PP_CONTROL, pp); |
864 | POSTING_READ(PCH_PP_CONTROL); | 1028 | POSTING_READ(PCH_PP_CONTROL); |
865 | 1029 | ||
@@ -868,44 +1032,64 @@ static bool ironlake_edp_panel_on(struct intel_dp *intel_dp) | |||
868 | DRM_ERROR("panel on wait timed out: 0x%08x\n", | 1032 | DRM_ERROR("panel on wait timed out: 0x%08x\n", |
869 | I915_READ(PCH_PP_STATUS)); | 1033 | I915_READ(PCH_PP_STATUS)); |
870 | 1034 | ||
871 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 1035 | if (IS_GEN5(dev)) { |
872 | I915_WRITE(PCH_PP_CONTROL, pp); | 1036 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
873 | POSTING_READ(PCH_PP_CONTROL); | 1037 | I915_WRITE(PCH_PP_CONTROL, pp); |
874 | 1038 | POSTING_READ(PCH_PP_CONTROL); | |
875 | return false; | 1039 | } |
876 | } | 1040 | } |
877 | 1041 | ||
878 | static void ironlake_edp_panel_off(struct drm_device *dev) | 1042 | static void ironlake_edp_panel_off(struct drm_encoder *encoder) |
879 | { | 1043 | { |
1044 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | ||
1045 | struct drm_device *dev = encoder->dev; | ||
880 | struct drm_i915_private *dev_priv = dev->dev_private; | 1046 | struct drm_i915_private *dev_priv = dev->dev_private; |
881 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | | 1047 | u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | |
882 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; | 1048 | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK; |
883 | 1049 | ||
1050 | if (!is_edp(intel_dp)) | ||
1051 | return; | ||
884 | pp = I915_READ(PCH_PP_CONTROL); | 1052 | pp = I915_READ(PCH_PP_CONTROL); |
1053 | pp &= ~PANEL_UNLOCK_MASK; | ||
1054 | pp |= PANEL_UNLOCK_REGS; | ||
1055 | |||
1056 | if (IS_GEN5(dev)) { | ||
1057 | /* ILK workaround: disable reset around power sequence */ | ||
1058 | pp &= ~PANEL_POWER_RESET; | ||
1059 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1060 | POSTING_READ(PCH_PP_CONTROL); | ||
1061 | } | ||
885 | 1062 | ||
886 | /* ILK workaround: disable reset around power sequence */ | 1063 | intel_dp->panel_off_jiffies = jiffies; |
887 | pp &= ~PANEL_POWER_RESET; | ||
888 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
889 | POSTING_READ(PCH_PP_CONTROL); | ||
890 | 1064 | ||
891 | pp &= ~POWER_TARGET_ON; | 1065 | if (IS_GEN5(dev)) { |
892 | I915_WRITE(PCH_PP_CONTROL, pp); | 1066 | pp &= ~POWER_TARGET_ON; |
893 | POSTING_READ(PCH_PP_CONTROL); | 1067 | I915_WRITE(PCH_PP_CONTROL, pp); |
1068 | POSTING_READ(PCH_PP_CONTROL); | ||
1069 | pp &= ~POWER_TARGET_ON; | ||
1070 | I915_WRITE(PCH_PP_CONTROL, pp); | ||
1071 | POSTING_READ(PCH_PP_CONTROL); | ||
1072 | msleep(intel_dp->panel_power_cycle_delay); | ||
894 | 1073 | ||
895 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) | 1074 | if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000)) |
896 | DRM_ERROR("panel off wait timed out: 0x%08x\n", | 1075 | DRM_ERROR("panel off wait timed out: 0x%08x\n", |
897 | I915_READ(PCH_PP_STATUS)); | 1076 | I915_READ(PCH_PP_STATUS)); |
898 | 1077 | ||
899 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ | 1078 | pp |= PANEL_POWER_RESET; /* restore panel reset bit */ |
900 | I915_WRITE(PCH_PP_CONTROL, pp); | 1079 | I915_WRITE(PCH_PP_CONTROL, pp); |
901 | POSTING_READ(PCH_PP_CONTROL); | 1080 | POSTING_READ(PCH_PP_CONTROL); |
1081 | } | ||
902 | } | 1082 | } |
903 | 1083 | ||
904 | static void ironlake_edp_backlight_on(struct drm_device *dev) | 1084 | static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) |
905 | { | 1085 | { |
1086 | struct drm_device *dev = intel_dp->base.base.dev; | ||
906 | struct drm_i915_private *dev_priv = dev->dev_private; | 1087 | struct drm_i915_private *dev_priv = dev->dev_private; |
907 | u32 pp; | 1088 | u32 pp; |
908 | 1089 | ||
1090 | if (!is_edp(intel_dp)) | ||
1091 | return; | ||
1092 | |||
909 | DRM_DEBUG_KMS("\n"); | 1093 | DRM_DEBUG_KMS("\n"); |
910 | /* | 1094 | /* |
911 | * If we enable the backlight right away following a panel power | 1095 | * If we enable the backlight right away following a panel power |
@@ -913,21 +1097,32 @@ static void ironlake_edp_backlight_on(struct drm_device *dev) | |||
913 | * link. So delay a bit to make sure the image is solid before | 1097 | * link. So delay a bit to make sure the image is solid before |
914 | * allowing it to appear. | 1098 | * allowing it to appear. |
915 | */ | 1099 | */ |
916 | msleep(300); | 1100 | msleep(intel_dp->backlight_on_delay); |
917 | pp = I915_READ(PCH_PP_CONTROL); | 1101 | pp = I915_READ(PCH_PP_CONTROL); |
1102 | pp &= ~PANEL_UNLOCK_MASK; | ||
1103 | pp |= PANEL_UNLOCK_REGS; | ||
918 | pp |= EDP_BLC_ENABLE; | 1104 | pp |= EDP_BLC_ENABLE; |
919 | I915_WRITE(PCH_PP_CONTROL, pp); | 1105 | I915_WRITE(PCH_PP_CONTROL, pp); |
1106 | POSTING_READ(PCH_PP_CONTROL); | ||
920 | } | 1107 | } |
921 | 1108 | ||
922 | static void ironlake_edp_backlight_off(struct drm_device *dev) | 1109 | static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) |
923 | { | 1110 | { |
1111 | struct drm_device *dev = intel_dp->base.base.dev; | ||
924 | struct drm_i915_private *dev_priv = dev->dev_private; | 1112 | struct drm_i915_private *dev_priv = dev->dev_private; |
925 | u32 pp; | 1113 | u32 pp; |
926 | 1114 | ||
1115 | if (!is_edp(intel_dp)) | ||
1116 | return; | ||
1117 | |||
927 | DRM_DEBUG_KMS("\n"); | 1118 | DRM_DEBUG_KMS("\n"); |
928 | pp = I915_READ(PCH_PP_CONTROL); | 1119 | pp = I915_READ(PCH_PP_CONTROL); |
1120 | pp &= ~PANEL_UNLOCK_MASK; | ||
1121 | pp |= PANEL_UNLOCK_REGS; | ||
929 | pp &= ~EDP_BLC_ENABLE; | 1122 | pp &= ~EDP_BLC_ENABLE; |
930 | I915_WRITE(PCH_PP_CONTROL, pp); | 1123 | I915_WRITE(PCH_PP_CONTROL, pp); |
1124 | POSTING_READ(PCH_PP_CONTROL); | ||
1125 | msleep(intel_dp->backlight_off_delay); | ||
931 | } | 1126 | } |
932 | 1127 | ||
933 | static void ironlake_edp_pll_on(struct drm_encoder *encoder) | 1128 | static void ironlake_edp_pll_on(struct drm_encoder *encoder) |
@@ -990,43 +1185,39 @@ static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) | |||
990 | static void intel_dp_prepare(struct drm_encoder *encoder) | 1185 | static void intel_dp_prepare(struct drm_encoder *encoder) |
991 | { | 1186 | { |
992 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1187 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
993 | struct drm_device *dev = encoder->dev; | ||
994 | 1188 | ||
995 | /* Wake up the sink first */ | 1189 | /* Wake up the sink first */ |
1190 | ironlake_edp_panel_vdd_on(intel_dp); | ||
996 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); | 1191 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1192 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
997 | 1193 | ||
998 | if (is_edp(intel_dp)) { | 1194 | /* Make sure the panel is off before trying to |
999 | ironlake_edp_backlight_off(dev); | 1195 | * change the mode |
1000 | ironlake_edp_panel_off(dev); | 1196 | */ |
1001 | if (!is_pch_edp(intel_dp)) | 1197 | ironlake_edp_backlight_off(intel_dp); |
1002 | ironlake_edp_pll_on(encoder); | ||
1003 | else | ||
1004 | ironlake_edp_pll_off(encoder); | ||
1005 | } | ||
1006 | intel_dp_link_down(intel_dp); | 1198 | intel_dp_link_down(intel_dp); |
1199 | ironlake_edp_panel_off(encoder); | ||
1007 | } | 1200 | } |
1008 | 1201 | ||
1009 | static void intel_dp_commit(struct drm_encoder *encoder) | 1202 | static void intel_dp_commit(struct drm_encoder *encoder) |
1010 | { | 1203 | { |
1011 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); | 1204 | struct intel_dp *intel_dp = enc_to_intel_dp(encoder); |
1012 | struct drm_device *dev = encoder->dev; | 1205 | struct drm_device *dev = encoder->dev; |
1206 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); | ||
1013 | 1207 | ||
1014 | if (is_edp(intel_dp)) | 1208 | ironlake_edp_panel_vdd_on(intel_dp); |
1015 | ironlake_edp_panel_vdd_on(intel_dp); | 1209 | intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); |
1016 | |||
1017 | intel_dp_start_link_train(intel_dp); | 1210 | intel_dp_start_link_train(intel_dp); |
1018 | 1211 | ironlake_edp_panel_on(intel_dp); | |
1019 | if (is_edp(intel_dp)) { | 1212 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1020 | ironlake_edp_panel_on(intel_dp); | ||
1021 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1022 | } | ||
1023 | 1213 | ||
1024 | intel_dp_complete_link_train(intel_dp); | 1214 | intel_dp_complete_link_train(intel_dp); |
1025 | 1215 | ironlake_edp_backlight_on(intel_dp); | |
1026 | if (is_edp(intel_dp)) | ||
1027 | ironlake_edp_backlight_on(dev); | ||
1028 | 1216 | ||
1029 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; | 1217 | intel_dp->dpms_mode = DRM_MODE_DPMS_ON; |
1218 | |||
1219 | if (HAS_PCH_CPT(dev)) | ||
1220 | intel_cpt_verify_modeset(dev, intel_crtc->pipe); | ||
1030 | } | 1221 | } |
1031 | 1222 | ||
1032 | static void | 1223 | static void |
@@ -1038,28 +1229,27 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode) | |||
1038 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); | 1229 | uint32_t dp_reg = I915_READ(intel_dp->output_reg); |
1039 | 1230 | ||
1040 | if (mode != DRM_MODE_DPMS_ON) { | 1231 | if (mode != DRM_MODE_DPMS_ON) { |
1232 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1041 | if (is_edp(intel_dp)) | 1233 | if (is_edp(intel_dp)) |
1042 | ironlake_edp_backlight_off(dev); | 1234 | ironlake_edp_backlight_off(intel_dp); |
1043 | intel_dp_sink_dpms(intel_dp, mode); | 1235 | intel_dp_sink_dpms(intel_dp, mode); |
1044 | intel_dp_link_down(intel_dp); | 1236 | intel_dp_link_down(intel_dp); |
1045 | if (is_edp(intel_dp)) | 1237 | ironlake_edp_panel_off(encoder); |
1046 | ironlake_edp_panel_off(dev); | ||
1047 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) | 1238 | if (is_edp(intel_dp) && !is_pch_edp(intel_dp)) |
1048 | ironlake_edp_pll_off(encoder); | 1239 | ironlake_edp_pll_off(encoder); |
1240 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
1049 | } else { | 1241 | } else { |
1050 | if (is_edp(intel_dp)) | 1242 | ironlake_edp_panel_vdd_on(intel_dp); |
1051 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1052 | intel_dp_sink_dpms(intel_dp, mode); | 1243 | intel_dp_sink_dpms(intel_dp, mode); |
1053 | if (!(dp_reg & DP_PORT_EN)) { | 1244 | if (!(dp_reg & DP_PORT_EN)) { |
1054 | intel_dp_start_link_train(intel_dp); | 1245 | intel_dp_start_link_train(intel_dp); |
1055 | if (is_edp(intel_dp)) { | 1246 | ironlake_edp_panel_on(intel_dp); |
1056 | ironlake_edp_panel_on(intel_dp); | 1247 | ironlake_edp_panel_vdd_off(intel_dp, true); |
1057 | ironlake_edp_panel_vdd_off(intel_dp); | ||
1058 | } | ||
1059 | intel_dp_complete_link_train(intel_dp); | 1248 | intel_dp_complete_link_train(intel_dp); |
1060 | } | 1249 | ironlake_edp_backlight_on(intel_dp); |
1061 | if (is_edp(intel_dp)) | 1250 | } else |
1062 | ironlake_edp_backlight_on(dev); | 1251 | ironlake_edp_panel_vdd_off(intel_dp, false); |
1252 | ironlake_edp_backlight_on(intel_dp); | ||
1063 | } | 1253 | } |
1064 | intel_dp->dpms_mode = mode; | 1254 | intel_dp->dpms_mode = mode; |
1065 | } | 1255 | } |
@@ -1368,7 +1558,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1368 | DP_LINK_CONFIGURATION_SIZE); | 1558 | DP_LINK_CONFIGURATION_SIZE); |
1369 | 1559 | ||
1370 | DP |= DP_PORT_EN; | 1560 | DP |= DP_PORT_EN; |
1371 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1561 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1372 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1562 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1373 | else | 1563 | else |
1374 | DP &= ~DP_LINK_TRAIN_MASK; | 1564 | DP &= ~DP_LINK_TRAIN_MASK; |
@@ -1387,7 +1577,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1387 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1577 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1388 | } | 1578 | } |
1389 | 1579 | ||
1390 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1580 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1391 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; | 1581 | reg = DP | DP_LINK_TRAIN_PAT_1_CPT; |
1392 | else | 1582 | else |
1393 | reg = DP | DP_LINK_TRAIN_PAT_1; | 1583 | reg = DP | DP_LINK_TRAIN_PAT_1; |
@@ -1462,7 +1652,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1462 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1652 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1463 | } | 1653 | } |
1464 | 1654 | ||
1465 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1655 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1466 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; | 1656 | reg = DP | DP_LINK_TRAIN_PAT_2_CPT; |
1467 | else | 1657 | else |
1468 | reg = DP | DP_LINK_TRAIN_PAT_2; | 1658 | reg = DP | DP_LINK_TRAIN_PAT_2; |
@@ -1503,7 +1693,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1503 | ++tries; | 1693 | ++tries; |
1504 | } | 1694 | } |
1505 | 1695 | ||
1506 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1696 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) |
1507 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1697 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1508 | else | 1698 | else |
1509 | reg = DP | DP_LINK_TRAIN_OFF; | 1699 | reg = DP | DP_LINK_TRAIN_OFF; |
@@ -1533,7 +1723,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1533 | udelay(100); | 1723 | udelay(100); |
1534 | } | 1724 | } |
1535 | 1725 | ||
1536 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) { | 1726 | if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) { |
1537 | DP &= ~DP_LINK_TRAIN_MASK_CPT; | 1727 | DP &= ~DP_LINK_TRAIN_MASK_CPT; |
1538 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); | 1728 | I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); |
1539 | } else { | 1729 | } else { |
@@ -1582,6 +1772,7 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1582 | 1772 | ||
1583 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 1773 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1584 | POSTING_READ(intel_dp->output_reg); | 1774 | POSTING_READ(intel_dp->output_reg); |
1775 | msleep(intel_dp->panel_power_down_delay); | ||
1585 | } | 1776 | } |
1586 | 1777 | ||
1587 | static bool | 1778 | static bool |
@@ -1596,6 +1787,27 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
1596 | return false; | 1787 | return false; |
1597 | } | 1788 | } |
1598 | 1789 | ||
1790 | static bool | ||
1791 | intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) | ||
1792 | { | ||
1793 | int ret; | ||
1794 | |||
1795 | ret = intel_dp_aux_native_read_retry(intel_dp, | ||
1796 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
1797 | sink_irq_vector, 1); | ||
1798 | if (!ret) | ||
1799 | return false; | ||
1800 | |||
1801 | return true; | ||
1802 | } | ||
1803 | |||
1804 | static void | ||
1805 | intel_dp_handle_test_request(struct intel_dp *intel_dp) | ||
1806 | { | ||
1807 | /* NAK by default */ | ||
1808 | intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); | ||
1809 | } | ||
1810 | |||
1599 | /* | 1811 | /* |
1600 | * According to DP spec | 1812 | * According to DP spec |
1601 | * 5.1.2: | 1813 | * 5.1.2: |
@@ -1608,6 +1820,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp) | |||
1608 | static void | 1820 | static void |
1609 | intel_dp_check_link_status(struct intel_dp *intel_dp) | 1821 | intel_dp_check_link_status(struct intel_dp *intel_dp) |
1610 | { | 1822 | { |
1823 | u8 sink_irq_vector; | ||
1824 | |||
1611 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) | 1825 | if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) |
1612 | return; | 1826 | return; |
1613 | 1827 | ||
@@ -1626,6 +1840,20 @@ intel_dp_check_link_status(struct intel_dp *intel_dp) | |||
1626 | return; | 1840 | return; |
1627 | } | 1841 | } |
1628 | 1842 | ||
1843 | /* Try to read the source of the interrupt */ | ||
1844 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && | ||
1845 | intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { | ||
1846 | /* Clear interrupt source */ | ||
1847 | intel_dp_aux_native_write_1(intel_dp, | ||
1848 | DP_DEVICE_SERVICE_IRQ_VECTOR, | ||
1849 | sink_irq_vector); | ||
1850 | |||
1851 | if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) | ||
1852 | intel_dp_handle_test_request(intel_dp); | ||
1853 | if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) | ||
1854 | DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); | ||
1855 | } | ||
1856 | |||
1629 | if (!intel_channel_eq_ok(intel_dp)) { | 1857 | if (!intel_channel_eq_ok(intel_dp)) { |
1630 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", | 1858 | DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", |
1631 | drm_get_encoder_name(&intel_dp->base.base)); | 1859 | drm_get_encoder_name(&intel_dp->base.base)); |
@@ -1687,6 +1915,31 @@ g4x_dp_detect(struct intel_dp *intel_dp) | |||
1687 | return intel_dp_detect_dpcd(intel_dp); | 1915 | return intel_dp_detect_dpcd(intel_dp); |
1688 | } | 1916 | } |
1689 | 1917 | ||
1918 | static struct edid * | ||
1919 | intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) | ||
1920 | { | ||
1921 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1922 | struct edid *edid; | ||
1923 | |||
1924 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1925 | edid = drm_get_edid(connector, adapter); | ||
1926 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
1927 | return edid; | ||
1928 | } | ||
1929 | |||
1930 | static int | ||
1931 | intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) | ||
1932 | { | ||
1933 | struct intel_dp *intel_dp = intel_attached_dp(connector); | ||
1934 | int ret; | ||
1935 | |||
1936 | ironlake_edp_panel_vdd_on(intel_dp); | ||
1937 | ret = intel_ddc_get_modes(connector, adapter); | ||
1938 | ironlake_edp_panel_vdd_off(intel_dp, false); | ||
1939 | return ret; | ||
1940 | } | ||
1941 | |||
1942 | |||
1690 | /** | 1943 | /** |
1691 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. | 1944 | * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. |
1692 | * | 1945 | * |
@@ -1719,7 +1972,7 @@ intel_dp_detect(struct drm_connector *connector, bool force) | |||
1719 | if (intel_dp->force_audio) { | 1972 | if (intel_dp->force_audio) { |
1720 | intel_dp->has_audio = intel_dp->force_audio > 0; | 1973 | intel_dp->has_audio = intel_dp->force_audio > 0; |
1721 | } else { | 1974 | } else { |
1722 | edid = drm_get_edid(connector, &intel_dp->adapter); | 1975 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
1723 | if (edid) { | 1976 | if (edid) { |
1724 | intel_dp->has_audio = drm_detect_monitor_audio(edid); | 1977 | intel_dp->has_audio = drm_detect_monitor_audio(edid); |
1725 | connector->display_info.raw_edid = NULL; | 1978 | connector->display_info.raw_edid = NULL; |
@@ -1740,28 +1993,36 @@ static int intel_dp_get_modes(struct drm_connector *connector) | |||
1740 | /* We should parse the EDID data and find out if it has an audio sink | 1993 | /* We should parse the EDID data and find out if it has an audio sink |
1741 | */ | 1994 | */ |
1742 | 1995 | ||
1743 | ret = intel_ddc_get_modes(connector, &intel_dp->adapter); | 1996 | ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); |
1744 | if (ret) { | 1997 | if (ret) { |
1745 | if (is_edp(intel_dp) && !dev_priv->panel_fixed_mode) { | 1998 | if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { |
1746 | struct drm_display_mode *newmode; | 1999 | struct drm_display_mode *newmode; |
1747 | list_for_each_entry(newmode, &connector->probed_modes, | 2000 | list_for_each_entry(newmode, &connector->probed_modes, |
1748 | head) { | 2001 | head) { |
1749 | if (newmode->type & DRM_MODE_TYPE_PREFERRED) { | 2002 | if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { |
1750 | dev_priv->panel_fixed_mode = | 2003 | intel_dp->panel_fixed_mode = |
1751 | drm_mode_duplicate(dev, newmode); | 2004 | drm_mode_duplicate(dev, newmode); |
1752 | break; | 2005 | break; |
1753 | } | 2006 | } |
1754 | } | 2007 | } |
1755 | } | 2008 | } |
1756 | |||
1757 | return ret; | 2009 | return ret; |
1758 | } | 2010 | } |
1759 | 2011 | ||
1760 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ | 2012 | /* if eDP has no EDID, try to use fixed panel mode from VBT */ |
1761 | if (is_edp(intel_dp)) { | 2013 | if (is_edp(intel_dp)) { |
1762 | if (dev_priv->panel_fixed_mode != NULL) { | 2014 | /* initialize panel mode from VBT if available for eDP */ |
2015 | if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { | ||
2016 | intel_dp->panel_fixed_mode = | ||
2017 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | ||
2018 | if (intel_dp->panel_fixed_mode) { | ||
2019 | intel_dp->panel_fixed_mode->type |= | ||
2020 | DRM_MODE_TYPE_PREFERRED; | ||
2021 | } | ||
2022 | } | ||
2023 | if (intel_dp->panel_fixed_mode) { | ||
1763 | struct drm_display_mode *mode; | 2024 | struct drm_display_mode *mode; |
1764 | mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); | 2025 | mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); |
1765 | drm_mode_probed_add(connector, mode); | 2026 | drm_mode_probed_add(connector, mode); |
1766 | return 1; | 2027 | return 1; |
1767 | } | 2028 | } |
@@ -1776,7 +2037,7 @@ intel_dp_detect_audio(struct drm_connector *connector) | |||
1776 | struct edid *edid; | 2037 | struct edid *edid; |
1777 | bool has_audio = false; | 2038 | bool has_audio = false; |
1778 | 2039 | ||
1779 | edid = drm_get_edid(connector, &intel_dp->adapter); | 2040 | edid = intel_dp_get_edid(connector, &intel_dp->adapter); |
1780 | if (edid) { | 2041 | if (edid) { |
1781 | has_audio = drm_detect_monitor_audio(edid); | 2042 | has_audio = drm_detect_monitor_audio(edid); |
1782 | 2043 | ||
@@ -1861,6 +2122,10 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder) | |||
1861 | 2122 | ||
1862 | i2c_del_adapter(&intel_dp->adapter); | 2123 | i2c_del_adapter(&intel_dp->adapter); |
1863 | drm_encoder_cleanup(encoder); | 2124 | drm_encoder_cleanup(encoder); |
2125 | if (is_edp(intel_dp)) { | ||
2126 | cancel_delayed_work_sync(&intel_dp->panel_vdd_work); | ||
2127 | ironlake_panel_vdd_off_sync(intel_dp); | ||
2128 | } | ||
1864 | kfree(intel_dp); | 2129 | kfree(intel_dp); |
1865 | } | 2130 | } |
1866 | 2131 | ||
@@ -1997,10 +2262,13 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
1997 | else if (output_reg == DP_D || output_reg == PCH_DP_D) | 2262 | else if (output_reg == DP_D || output_reg == PCH_DP_D) |
1998 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); | 2263 | intel_encoder->clone_mask = (1 << INTEL_DP_D_CLONE_BIT); |
1999 | 2264 | ||
2000 | if (is_edp(intel_dp)) | 2265 | if (is_edp(intel_dp)) { |
2001 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); | 2266 | intel_encoder->clone_mask = (1 << INTEL_EDP_CLONE_BIT); |
2267 | INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, | ||
2268 | ironlake_panel_vdd_work); | ||
2269 | } | ||
2002 | 2270 | ||
2003 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 2271 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
2004 | connector->interlace_allowed = true; | 2272 | connector->interlace_allowed = true; |
2005 | connector->doublescan_allowed = 0; | 2273 | connector->doublescan_allowed = 0; |
2006 | 2274 | ||
@@ -2036,25 +2304,60 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2036 | break; | 2304 | break; |
2037 | } | 2305 | } |
2038 | 2306 | ||
2039 | intel_dp_i2c_init(intel_dp, intel_connector, name); | ||
2040 | |||
2041 | /* Cache some DPCD data in the eDP case */ | 2307 | /* Cache some DPCD data in the eDP case */ |
2042 | if (is_edp(intel_dp)) { | 2308 | if (is_edp(intel_dp)) { |
2043 | bool ret; | 2309 | bool ret; |
2044 | u32 pp_on, pp_div; | 2310 | struct edp_power_seq cur, vbt; |
2311 | u32 pp_on, pp_off, pp_div; | ||
2045 | 2312 | ||
2046 | pp_on = I915_READ(PCH_PP_ON_DELAYS); | 2313 | pp_on = I915_READ(PCH_PP_ON_DELAYS); |
2314 | pp_off = I915_READ(PCH_PP_OFF_DELAYS); | ||
2047 | pp_div = I915_READ(PCH_PP_DIVISOR); | 2315 | pp_div = I915_READ(PCH_PP_DIVISOR); |
2048 | 2316 | ||
2049 | /* Get T3 & T12 values (note: VESA not bspec terminology) */ | 2317 | /* Pull timing values out of registers */ |
2050 | dev_priv->panel_t3 = (pp_on & 0x1fff0000) >> 16; | 2318 | cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> |
2051 | dev_priv->panel_t3 /= 10; /* t3 in 100us units */ | 2319 | PANEL_POWER_UP_DELAY_SHIFT; |
2052 | dev_priv->panel_t12 = pp_div & 0xf; | 2320 | |
2053 | dev_priv->panel_t12 *= 100; /* t12 in 100ms units */ | 2321 | cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> |
2322 | PANEL_LIGHT_ON_DELAY_SHIFT; | ||
2323 | |||
2324 | cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> | ||
2325 | PANEL_LIGHT_OFF_DELAY_SHIFT; | ||
2326 | |||
2327 | cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> | ||
2328 | PANEL_POWER_DOWN_DELAY_SHIFT; | ||
2329 | |||
2330 | cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> | ||
2331 | PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; | ||
2332 | |||
2333 | DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2334 | cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); | ||
2335 | |||
2336 | vbt = dev_priv->edp.pps; | ||
2337 | |||
2338 | DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", | ||
2339 | vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); | ||
2340 | |||
2341 | #define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) | ||
2342 | |||
2343 | intel_dp->panel_power_up_delay = get_delay(t1_t3); | ||
2344 | intel_dp->backlight_on_delay = get_delay(t8); | ||
2345 | intel_dp->backlight_off_delay = get_delay(t9); | ||
2346 | intel_dp->panel_power_down_delay = get_delay(t10); | ||
2347 | intel_dp->panel_power_cycle_delay = get_delay(t11_t12); | ||
2348 | |||
2349 | DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", | ||
2350 | intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, | ||
2351 | intel_dp->panel_power_cycle_delay); | ||
2352 | |||
2353 | DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", | ||
2354 | intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); | ||
2355 | |||
2356 | intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay; | ||
2054 | 2357 | ||
2055 | ironlake_edp_panel_vdd_on(intel_dp); | 2358 | ironlake_edp_panel_vdd_on(intel_dp); |
2056 | ret = intel_dp_get_dpcd(intel_dp); | 2359 | ret = intel_dp_get_dpcd(intel_dp); |
2057 | ironlake_edp_panel_vdd_off(intel_dp); | 2360 | ironlake_edp_panel_vdd_off(intel_dp, false); |
2058 | if (ret) { | 2361 | if (ret) { |
2059 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) | 2362 | if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) |
2060 | dev_priv->no_aux_handshake = | 2363 | dev_priv->no_aux_handshake = |
@@ -2069,18 +2372,11 @@ intel_dp_init(struct drm_device *dev, int output_reg) | |||
2069 | } | 2372 | } |
2070 | } | 2373 | } |
2071 | 2374 | ||
2375 | intel_dp_i2c_init(intel_dp, intel_connector, name); | ||
2376 | |||
2072 | intel_encoder->hot_plug = intel_dp_hot_plug; | 2377 | intel_encoder->hot_plug = intel_dp_hot_plug; |
2073 | 2378 | ||
2074 | if (is_edp(intel_dp)) { | 2379 | if (is_edp(intel_dp)) { |
2075 | /* initialize panel mode from VBT if available for eDP */ | ||
2076 | if (dev_priv->lfp_lvds_vbt_mode) { | ||
2077 | dev_priv->panel_fixed_mode = | ||
2078 | drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); | ||
2079 | if (dev_priv->panel_fixed_mode) { | ||
2080 | dev_priv->panel_fixed_mode->type |= | ||
2081 | DRM_MODE_TYPE_PREFERRED; | ||
2082 | } | ||
2083 | } | ||
2084 | dev_priv->int_edp_connector = connector; | 2380 | dev_priv->int_edp_connector = connector; |
2085 | intel_panel_setup_backlight(dev); | 2381 | intel_panel_setup_backlight(dev); |
2086 | } | 2382 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 98044d626a8d..bd9a604b73da 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -171,6 +171,9 @@ struct intel_crtc { | |||
171 | int16_t cursor_width, cursor_height; | 171 | int16_t cursor_width, cursor_height; |
172 | bool cursor_visible; | 172 | bool cursor_visible; |
173 | unsigned int bpp; | 173 | unsigned int bpp; |
174 | |||
175 | bool no_pll; /* tertiary pipe for IVB */ | ||
176 | bool use_pll_a; | ||
174 | }; | 177 | }; |
175 | 178 | ||
176 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) | 179 | #define to_intel_crtc(x) container_of(x, struct intel_crtc, base) |
@@ -184,7 +187,7 @@ struct intel_crtc { | |||
184 | #define DIP_VERSION_AVI 0x2 | 187 | #define DIP_VERSION_AVI 0x2 |
185 | #define DIP_LEN_AVI 13 | 188 | #define DIP_LEN_AVI 13 |
186 | 189 | ||
187 | #define DIP_TYPE_SPD 0x3 | 190 | #define DIP_TYPE_SPD 0x83 |
188 | #define DIP_VERSION_SPD 0x1 | 191 | #define DIP_VERSION_SPD 0x1 |
189 | #define DIP_LEN_SPD 25 | 192 | #define DIP_LEN_SPD 25 |
190 | #define DIP_SPD_UNKNOWN 0 | 193 | #define DIP_SPD_UNKNOWN 0 |
@@ -379,4 +382,6 @@ extern void intel_fb_restore_mode(struct drm_device *dev); | |||
379 | extern void intel_init_clock_gating(struct drm_device *dev); | 382 | extern void intel_init_clock_gating(struct drm_device *dev); |
380 | extern void intel_write_eld(struct drm_encoder *encoder, | 383 | extern void intel_write_eld(struct drm_encoder *encoder, |
381 | struct drm_display_mode *mode); | 384 | struct drm_display_mode *mode); |
385 | extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe); | ||
386 | |||
382 | #endif /* __INTEL_DRV_H__ */ | 387 | #endif /* __INTEL_DRV_H__ */ |
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 75026ba41a8e..d4f5a0b2120d 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c | |||
@@ -69,8 +69,7 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame) | |||
69 | frame->checksum = 0; | 69 | frame->checksum = 0; |
70 | frame->ecc = 0; | 70 | frame->ecc = 0; |
71 | 71 | ||
72 | /* Header isn't part of the checksum */ | 72 | for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++) |
73 | for (i = 5; i < frame->len; i++) | ||
74 | sum += data[i]; | 73 | sum += data[i]; |
75 | 74 | ||
76 | frame->checksum = 0x100 - sum; | 75 | frame->checksum = 0x100 - sum; |
@@ -104,7 +103,7 @@ static u32 intel_infoframe_flags(struct dip_infoframe *frame) | |||
104 | flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; | 103 | flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC; |
105 | break; | 104 | break; |
106 | case DIP_TYPE_SPD: | 105 | case DIP_TYPE_SPD: |
107 | flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC; | 106 | flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC; |
108 | break; | 107 | break; |
109 | default: | 108 | default: |
110 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); | 109 | DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type); |
@@ -165,9 +164,9 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder, | |||
165 | 164 | ||
166 | flags = intel_infoframe_index(frame); | 165 | flags = intel_infoframe_index(frame); |
167 | 166 | ||
168 | val &= ~VIDEO_DIP_SELECT_MASK; | 167 | val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */ |
169 | 168 | ||
170 | I915_WRITE(reg, val | flags); | 169 | I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags); |
171 | 170 | ||
172 | for (i = 0; i < len; i += 4) { | 171 | for (i = 0; i < len; i += 4) { |
173 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); | 172 | I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data); |
@@ -252,12 +251,10 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder, | |||
252 | intel_write_eld(encoder, adjusted_mode); | 251 | intel_write_eld(encoder, adjusted_mode); |
253 | } | 252 | } |
254 | 253 | ||
255 | if (intel_crtc->pipe == 1) { | 254 | if (HAS_PCH_CPT(dev)) |
256 | if (HAS_PCH_CPT(dev)) | 255 | sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe); |
257 | sdvox |= PORT_TRANS_B_SEL_CPT; | 256 | else if (intel_crtc->pipe == 1) |
258 | else | 257 | sdvox |= SDVO_PIPE_B_SELECT; |
259 | sdvox |= SDVO_PIPE_B_SELECT; | ||
260 | } | ||
261 | 258 | ||
262 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); | 259 | I915_WRITE(intel_hdmi->sdvox_reg, sdvox); |
263 | POSTING_READ(intel_hdmi->sdvox_reg); | 260 | POSTING_READ(intel_hdmi->sdvox_reg); |
@@ -489,6 +486,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
489 | struct intel_encoder *intel_encoder; | 486 | struct intel_encoder *intel_encoder; |
490 | struct intel_connector *intel_connector; | 487 | struct intel_connector *intel_connector; |
491 | struct intel_hdmi *intel_hdmi; | 488 | struct intel_hdmi *intel_hdmi; |
489 | int i; | ||
492 | 490 | ||
493 | intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); | 491 | intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL); |
494 | if (!intel_hdmi) | 492 | if (!intel_hdmi) |
@@ -514,7 +512,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
514 | connector->polled = DRM_CONNECTOR_POLL_HPD; | 512 | connector->polled = DRM_CONNECTOR_POLL_HPD; |
515 | connector->interlace_allowed = 0; | 513 | connector->interlace_allowed = 0; |
516 | connector->doublescan_allowed = 0; | 514 | connector->doublescan_allowed = 0; |
517 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 515 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
518 | 516 | ||
519 | /* Set up the DDC bus. */ | 517 | /* Set up the DDC bus. */ |
520 | if (sdvox_reg == SDVOB) { | 518 | if (sdvox_reg == SDVOB) { |
@@ -541,10 +539,14 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg) | |||
541 | 539 | ||
542 | intel_hdmi->sdvox_reg = sdvox_reg; | 540 | intel_hdmi->sdvox_reg = sdvox_reg; |
543 | 541 | ||
544 | if (!HAS_PCH_SPLIT(dev)) | 542 | if (!HAS_PCH_SPLIT(dev)) { |
545 | intel_hdmi->write_infoframe = i9xx_write_infoframe; | 543 | intel_hdmi->write_infoframe = i9xx_write_infoframe; |
546 | else | 544 | I915_WRITE(VIDEO_DIP_CTL, 0); |
545 | } else { | ||
547 | intel_hdmi->write_infoframe = ironlake_write_infoframe; | 546 | intel_hdmi->write_infoframe = ironlake_write_infoframe; |
547 | for_each_pipe(i) | ||
548 | I915_WRITE(TVIDEO_DIP_CTL(i), 0); | ||
549 | } | ||
548 | 550 | ||
549 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); | 551 | drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs); |
550 | 552 | ||
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index d98cee60b602..9ed5380e5a53 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -422,13 +422,7 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) | |||
422 | { | 422 | { |
423 | struct intel_gmbus *bus = to_intel_gmbus(adapter); | 423 | struct intel_gmbus *bus = to_intel_gmbus(adapter); |
424 | 424 | ||
425 | /* speed: | 425 | bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed; |
426 | * 0x0 = 100 KHz | ||
427 | * 0x1 = 50 KHz | ||
428 | * 0x2 = 400 KHz | ||
429 | * 0x3 = 1000 Khz | ||
430 | */ | ||
431 | bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8); | ||
432 | } | 426 | } |
433 | 427 | ||
434 | void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) | 428 | void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 31da77f5c051..42f165a520de 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -888,9 +888,11 @@ bool intel_lvds_init(struct drm_device *dev) | |||
888 | intel_encoder->type = INTEL_OUTPUT_LVDS; | 888 | intel_encoder->type = INTEL_OUTPUT_LVDS; |
889 | 889 | ||
890 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); | 890 | intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT); |
891 | intel_encoder->crtc_mask = (1 << 1); | 891 | if (HAS_PCH_SPLIT(dev)) |
892 | if (INTEL_INFO(dev)->gen >= 5) | 892 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
893 | intel_encoder->crtc_mask |= (1 << 0); | 893 | else |
894 | intel_encoder->crtc_mask = (1 << 1); | ||
895 | |||
894 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); | 896 | drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs); |
895 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); | 897 | drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs); |
896 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; | 898 | connector->display_info.subpixel_order = SubPixelHorizontalRGB; |
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 03500e94a73f..499d4c0dbeeb 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c | |||
@@ -226,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level) | |||
226 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); | 226 | I915_WRITE(BLC_PWM_CPU_CTL, val | level); |
227 | } | 227 | } |
228 | 228 | ||
229 | void intel_panel_set_backlight(struct drm_device *dev, u32 level) | 229 | static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level) |
230 | { | 230 | { |
231 | struct drm_i915_private *dev_priv = dev->dev_private; | 231 | struct drm_i915_private *dev_priv = dev->dev_private; |
232 | u32 tmp; | 232 | u32 tmp; |
@@ -254,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level) | |||
254 | I915_WRITE(BLC_PWM_CTL, tmp | level); | 254 | I915_WRITE(BLC_PWM_CTL, tmp | level); |
255 | } | 255 | } |
256 | 256 | ||
257 | void intel_panel_disable_backlight(struct drm_device *dev) | 257 | void intel_panel_set_backlight(struct drm_device *dev, u32 level) |
258 | { | 258 | { |
259 | struct drm_i915_private *dev_priv = dev->dev_private; | 259 | struct drm_i915_private *dev_priv = dev->dev_private; |
260 | 260 | ||
261 | if (dev_priv->backlight_enabled) { | 261 | dev_priv->backlight_level = level; |
262 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | 262 | if (dev_priv->backlight_enabled) |
263 | dev_priv->backlight_enabled = false; | 263 | intel_panel_actually_set_backlight(dev, level); |
264 | } | 264 | } |
265 | |||
266 | void intel_panel_disable_backlight(struct drm_device *dev) | ||
267 | { | ||
268 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
265 | 269 | ||
266 | intel_panel_set_backlight(dev, 0); | 270 | dev_priv->backlight_enabled = false; |
271 | intel_panel_actually_set_backlight(dev, 0); | ||
267 | } | 272 | } |
268 | 273 | ||
269 | void intel_panel_enable_backlight(struct drm_device *dev) | 274 | void intel_panel_enable_backlight(struct drm_device *dev) |
@@ -273,8 +278,8 @@ void intel_panel_enable_backlight(struct drm_device *dev) | |||
273 | if (dev_priv->backlight_level == 0) | 278 | if (dev_priv->backlight_level == 0) |
274 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); | 279 | dev_priv->backlight_level = intel_panel_get_max_backlight(dev); |
275 | 280 | ||
276 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
277 | dev_priv->backlight_enabled = true; | 281 | dev_priv->backlight_enabled = true; |
282 | intel_panel_actually_set_backlight(dev, dev_priv->backlight_level); | ||
278 | } | 283 | } |
279 | 284 | ||
280 | static void intel_panel_init_backlight(struct drm_device *dev) | 285 | static void intel_panel_init_backlight(struct drm_device *dev) |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 0e99589b54e0..ca70e2f10445 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -34,6 +34,16 @@ | |||
34 | #include "i915_trace.h" | 34 | #include "i915_trace.h" |
35 | #include "intel_drv.h" | 35 | #include "intel_drv.h" |
36 | 36 | ||
37 | /* | ||
38 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
39 | * over cache flushing. | ||
40 | */ | ||
41 | struct pipe_control { | ||
42 | struct drm_i915_gem_object *obj; | ||
43 | volatile u32 *cpu_page; | ||
44 | u32 gtt_offset; | ||
45 | }; | ||
46 | |||
37 | static inline int ring_space(struct intel_ring_buffer *ring) | 47 | static inline int ring_space(struct intel_ring_buffer *ring) |
38 | { | 48 | { |
39 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); | 49 | int space = (ring->head & HEAD_ADDR) - (ring->tail + 8); |
@@ -123,6 +133,118 @@ render_ring_flush(struct intel_ring_buffer *ring, | |||
123 | return 0; | 133 | return 0; |
124 | } | 134 | } |
125 | 135 | ||
136 | /** | ||
137 | * Emits a PIPE_CONTROL with a non-zero post-sync operation, for | ||
138 | * implementing two workarounds on gen6. From section 1.4.7.1 | ||
139 | * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1: | ||
140 | * | ||
141 | * [DevSNB-C+{W/A}] Before any depth stall flush (including those | ||
142 | * produced by non-pipelined state commands), software needs to first | ||
143 | * send a PIPE_CONTROL with no bits set except Post-Sync Operation != | ||
144 | * 0. | ||
145 | * | ||
146 | * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable | ||
147 | * =1, a PIPE_CONTROL with any non-zero post-sync-op is required. | ||
148 | * | ||
149 | * And the workaround for these two requires this workaround first: | ||
150 | * | ||
151 | * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent | ||
152 | * BEFORE the pipe-control with a post-sync op and no write-cache | ||
153 | * flushes. | ||
154 | * | ||
155 | * And this last workaround is tricky because of the requirements on | ||
156 | * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM | ||
157 | * volume 2 part 1: | ||
158 | * | ||
159 | * "1 of the following must also be set: | ||
160 | * - Render Target Cache Flush Enable ([12] of DW1) | ||
161 | * - Depth Cache Flush Enable ([0] of DW1) | ||
162 | * - Stall at Pixel Scoreboard ([1] of DW1) | ||
163 | * - Depth Stall ([13] of DW1) | ||
164 | * - Post-Sync Operation ([13] of DW1) | ||
165 | * - Notify Enable ([8] of DW1)" | ||
166 | * | ||
167 | * The cache flushes require the workaround flush that triggered this | ||
168 | * one, so we can't use it. Depth stall would trigger the same. | ||
169 | * Post-sync nonzero is what triggered this second workaround, so we | ||
170 | * can't use that one either. Notify enable is IRQs, which aren't | ||
171 | * really our business. That leaves only stall at scoreboard. | ||
172 | */ | ||
173 | static int | ||
174 | intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring) | ||
175 | { | ||
176 | struct pipe_control *pc = ring->private; | ||
177 | u32 scratch_addr = pc->gtt_offset + 128; | ||
178 | int ret; | ||
179 | |||
180 | |||
181 | ret = intel_ring_begin(ring, 6); | ||
182 | if (ret) | ||
183 | return ret; | ||
184 | |||
185 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | ||
186 | intel_ring_emit(ring, PIPE_CONTROL_CS_STALL | | ||
187 | PIPE_CONTROL_STALL_AT_SCOREBOARD); | ||
188 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | ||
189 | intel_ring_emit(ring, 0); /* low dword */ | ||
190 | intel_ring_emit(ring, 0); /* high dword */ | ||
191 | intel_ring_emit(ring, MI_NOOP); | ||
192 | intel_ring_advance(ring); | ||
193 | |||
194 | ret = intel_ring_begin(ring, 6); | ||
195 | if (ret) | ||
196 | return ret; | ||
197 | |||
198 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | ||
199 | intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE); | ||
200 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */ | ||
201 | intel_ring_emit(ring, 0); | ||
202 | intel_ring_emit(ring, 0); | ||
203 | intel_ring_emit(ring, MI_NOOP); | ||
204 | intel_ring_advance(ring); | ||
205 | |||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int | ||
210 | gen6_render_ring_flush(struct intel_ring_buffer *ring, | ||
211 | u32 invalidate_domains, u32 flush_domains) | ||
212 | { | ||
213 | u32 flags = 0; | ||
214 | struct pipe_control *pc = ring->private; | ||
215 | u32 scratch_addr = pc->gtt_offset + 128; | ||
216 | int ret; | ||
217 | |||
218 | /* Force SNB workarounds for PIPE_CONTROL flushes */ | ||
219 | intel_emit_post_sync_nonzero_flush(ring); | ||
220 | |||
221 | /* Just flush everything. Experiments have shown that reducing the | ||
222 | * number of bits based on the write domains has little performance | ||
223 | * impact. | ||
224 | */ | ||
225 | flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH; | ||
226 | flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE; | ||
227 | flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE; | ||
228 | flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH; | ||
229 | flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE; | ||
230 | flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE; | ||
231 | flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE; | ||
232 | |||
233 | ret = intel_ring_begin(ring, 6); | ||
234 | if (ret) | ||
235 | return ret; | ||
236 | |||
237 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5)); | ||
238 | intel_ring_emit(ring, flags); | ||
239 | intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); | ||
240 | intel_ring_emit(ring, 0); /* lower dword */ | ||
241 | intel_ring_emit(ring, 0); /* uppwer dword */ | ||
242 | intel_ring_emit(ring, MI_NOOP); | ||
243 | intel_ring_advance(ring); | ||
244 | |||
245 | return 0; | ||
246 | } | ||
247 | |||
126 | static void ring_write_tail(struct intel_ring_buffer *ring, | 248 | static void ring_write_tail(struct intel_ring_buffer *ring, |
127 | u32 value) | 249 | u32 value) |
128 | { | 250 | { |
@@ -206,16 +328,6 @@ static int init_ring_common(struct intel_ring_buffer *ring) | |||
206 | return 0; | 328 | return 0; |
207 | } | 329 | } |
208 | 330 | ||
209 | /* | ||
210 | * 965+ support PIPE_CONTROL commands, which provide finer grained control | ||
211 | * over cache flushing. | ||
212 | */ | ||
213 | struct pipe_control { | ||
214 | struct drm_i915_gem_object *obj; | ||
215 | volatile u32 *cpu_page; | ||
216 | u32 gtt_offset; | ||
217 | }; | ||
218 | |||
219 | static int | 331 | static int |
220 | init_pipe_control(struct intel_ring_buffer *ring) | 332 | init_pipe_control(struct intel_ring_buffer *ring) |
221 | { | 333 | { |
@@ -296,8 +408,7 @@ static int init_render_ring(struct intel_ring_buffer *ring) | |||
296 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); | 408 | GFX_MODE_ENABLE(GFX_REPLAY_MODE)); |
297 | } | 409 | } |
298 | 410 | ||
299 | if (INTEL_INFO(dev)->gen >= 6) { | 411 | if (INTEL_INFO(dev)->gen >= 5) { |
300 | } else if (IS_GEN5(dev)) { | ||
301 | ret = init_pipe_control(ring); | 412 | ret = init_pipe_control(ring); |
302 | if (ret) | 413 | if (ret) |
303 | return ret; | 414 | return ret; |
@@ -438,8 +549,8 @@ gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter, | |||
438 | 549 | ||
439 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ | 550 | #define PIPE_CONTROL_FLUSH(ring__, addr__) \ |
440 | do { \ | 551 | do { \ |
441 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \ | 552 | intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \ |
442 | PIPE_CONTROL_DEPTH_STALL | 2); \ | 553 | PIPE_CONTROL_DEPTH_STALL); \ |
443 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ | 554 | intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \ |
444 | intel_ring_emit(ring__, 0); \ | 555 | intel_ring_emit(ring__, 0); \ |
445 | intel_ring_emit(ring__, 0); \ | 556 | intel_ring_emit(ring__, 0); \ |
@@ -467,8 +578,9 @@ pc_render_add_request(struct intel_ring_buffer *ring, | |||
467 | if (ret) | 578 | if (ret) |
468 | return ret; | 579 | return ret; |
469 | 580 | ||
470 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 581 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
471 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH); | 582 | PIPE_CONTROL_WRITE_FLUSH | |
583 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE); | ||
472 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 584 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
473 | intel_ring_emit(ring, seqno); | 585 | intel_ring_emit(ring, seqno); |
474 | intel_ring_emit(ring, 0); | 586 | intel_ring_emit(ring, 0); |
@@ -483,8 +595,9 @@ pc_render_add_request(struct intel_ring_buffer *ring, | |||
483 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 595 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
484 | scratch_addr += 128; | 596 | scratch_addr += 128; |
485 | PIPE_CONTROL_FLUSH(ring, scratch_addr); | 597 | PIPE_CONTROL_FLUSH(ring, scratch_addr); |
486 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | | 598 | intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | |
487 | PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH | | 599 | PIPE_CONTROL_WRITE_FLUSH | |
600 | PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | | ||
488 | PIPE_CONTROL_NOTIFY); | 601 | PIPE_CONTROL_NOTIFY); |
489 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); | 602 | intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT); |
490 | intel_ring_emit(ring, seqno); | 603 | intel_ring_emit(ring, seqno); |
@@ -1358,6 +1471,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) | |||
1358 | *ring = render_ring; | 1471 | *ring = render_ring; |
1359 | if (INTEL_INFO(dev)->gen >= 6) { | 1472 | if (INTEL_INFO(dev)->gen >= 6) { |
1360 | ring->add_request = gen6_add_request; | 1473 | ring->add_request = gen6_add_request; |
1474 | ring->flush = gen6_render_ring_flush; | ||
1361 | ring->irq_get = gen6_render_ring_get_irq; | 1475 | ring->irq_get = gen6_render_ring_get_irq; |
1362 | ring->irq_put = gen6_render_ring_put_irq; | 1476 | ring->irq_put = gen6_render_ring_put_irq; |
1363 | } else if (IS_GEN5(dev)) { | 1477 | } else if (IS_GEN5(dev)) { |
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index 731200243219..6db3b1ccb6eb 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -1232,8 +1232,7 @@ static bool | |||
1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) | 1232 | intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo) |
1233 | { | 1233 | { |
1234 | /* Is there more than one type of output? */ | 1234 | /* Is there more than one type of output? */ |
1235 | int caps = intel_sdvo->caps.output_flags & 0xf; | 1235 | return hweight16(intel_sdvo->caps.output_flags) > 1; |
1236 | return caps & -caps; | ||
1237 | } | 1236 | } |
1238 | 1237 | ||
1239 | static struct edid * | 1238 | static struct edid * |
@@ -1254,7 +1253,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector) | |||
1254 | } | 1253 | } |
1255 | 1254 | ||
1256 | enum drm_connector_status | 1255 | enum drm_connector_status |
1257 | intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | 1256 | intel_sdvo_tmds_sink_detect(struct drm_connector *connector) |
1258 | { | 1257 | { |
1259 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); | 1258 | struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector); |
1260 | enum drm_connector_status status; | 1259 | enum drm_connector_status status; |
@@ -1349,7 +1348,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1349 | if ((intel_sdvo_connector->output_flag & response) == 0) | 1348 | if ((intel_sdvo_connector->output_flag & response) == 0) |
1350 | ret = connector_status_disconnected; | 1349 | ret = connector_status_disconnected; |
1351 | else if (IS_TMDS(intel_sdvo_connector)) | 1350 | else if (IS_TMDS(intel_sdvo_connector)) |
1352 | ret = intel_sdvo_hdmi_sink_detect(connector); | 1351 | ret = intel_sdvo_tmds_sink_detect(connector); |
1353 | else { | 1352 | else { |
1354 | struct edid *edid; | 1353 | struct edid *edid; |
1355 | 1354 | ||
@@ -1896,7 +1895,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
1896 | struct intel_sdvo *sdvo, u32 reg) | 1895 | struct intel_sdvo *sdvo, u32 reg) |
1897 | { | 1896 | { |
1898 | struct sdvo_device_mapping *mapping; | 1897 | struct sdvo_device_mapping *mapping; |
1899 | u8 pin, speed; | 1898 | u8 pin; |
1900 | 1899 | ||
1901 | if (IS_SDVOB(reg)) | 1900 | if (IS_SDVOB(reg)) |
1902 | mapping = &dev_priv->sdvo_mappings[0]; | 1901 | mapping = &dev_priv->sdvo_mappings[0]; |
@@ -1904,18 +1903,16 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
1904 | mapping = &dev_priv->sdvo_mappings[1]; | 1903 | mapping = &dev_priv->sdvo_mappings[1]; |
1905 | 1904 | ||
1906 | pin = GMBUS_PORT_DPB; | 1905 | pin = GMBUS_PORT_DPB; |
1907 | speed = GMBUS_RATE_1MHZ >> 8; | 1906 | if (mapping->initialized) |
1908 | if (mapping->initialized) { | ||
1909 | pin = mapping->i2c_pin; | 1907 | pin = mapping->i2c_pin; |
1910 | speed = mapping->i2c_speed; | ||
1911 | } | ||
1912 | 1908 | ||
1913 | if (pin < GMBUS_NUM_PORTS) { | 1909 | if (pin < GMBUS_NUM_PORTS) { |
1914 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; | 1910 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; |
1915 | intel_gmbus_set_speed(sdvo->i2c, speed); | 1911 | intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ); |
1916 | intel_gmbus_force_bit(sdvo->i2c, true); | 1912 | intel_gmbus_force_bit(sdvo->i2c, true); |
1917 | } else | 1913 | } else { |
1918 | sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; | 1914 | sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; |
1915 | } | ||
1919 | } | 1916 | } |
1920 | 1917 | ||
1921 | static bool | 1918 | static bool |
@@ -2206,7 +2203,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) | |||
2206 | bytes[0], bytes[1]); | 2203 | bytes[0], bytes[1]); |
2207 | return false; | 2204 | return false; |
2208 | } | 2205 | } |
2209 | intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1); | 2206 | intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); |
2210 | 2207 | ||
2211 | return true; | 2208 | return true; |
2212 | } | 2209 | } |
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index c621c98c99da..1e184c12012a 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c | |||
@@ -404,6 +404,9 @@ static int dmar_forcedac; | |||
404 | static int intel_iommu_strict; | 404 | static int intel_iommu_strict; |
405 | static int intel_iommu_superpage = 1; | 405 | static int intel_iommu_superpage = 1; |
406 | 406 | ||
407 | int intel_iommu_gfx_mapped; | ||
408 | EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped); | ||
409 | |||
407 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) | 410 | #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) |
408 | static DEFINE_SPINLOCK(device_domain_lock); | 411 | static DEFINE_SPINLOCK(device_domain_lock); |
409 | static LIST_HEAD(device_domain_list); | 412 | static LIST_HEAD(device_domain_list); |
@@ -3226,9 +3229,6 @@ static void __init init_no_remapping_devices(void) | |||
3226 | } | 3229 | } |
3227 | } | 3230 | } |
3228 | 3231 | ||
3229 | if (dmar_map_gfx) | ||
3230 | return; | ||
3231 | |||
3232 | for_each_drhd_unit(drhd) { | 3232 | for_each_drhd_unit(drhd) { |
3233 | int i; | 3233 | int i; |
3234 | if (drhd->ignored || drhd->include_all) | 3234 | if (drhd->ignored || drhd->include_all) |
@@ -3236,18 +3236,23 @@ static void __init init_no_remapping_devices(void) | |||
3236 | 3236 | ||
3237 | for (i = 0; i < drhd->devices_cnt; i++) | 3237 | for (i = 0; i < drhd->devices_cnt; i++) |
3238 | if (drhd->devices[i] && | 3238 | if (drhd->devices[i] && |
3239 | !IS_GFX_DEVICE(drhd->devices[i])) | 3239 | !IS_GFX_DEVICE(drhd->devices[i])) |
3240 | break; | 3240 | break; |
3241 | 3241 | ||
3242 | if (i < drhd->devices_cnt) | 3242 | if (i < drhd->devices_cnt) |
3243 | continue; | 3243 | continue; |
3244 | 3244 | ||
3245 | /* bypass IOMMU if it is just for gfx devices */ | 3245 | /* This IOMMU has *only* gfx devices. Either bypass it or |
3246 | drhd->ignored = 1; | 3246 | set the gfx_mapped flag, as appropriate */ |
3247 | for (i = 0; i < drhd->devices_cnt; i++) { | 3247 | if (dmar_map_gfx) { |
3248 | if (!drhd->devices[i]) | 3248 | intel_iommu_gfx_mapped = 1; |
3249 | continue; | 3249 | } else { |
3250 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | 3250 | drhd->ignored = 1; |
3251 | for (i = 0; i < drhd->devices_cnt; i++) { | ||
3252 | if (!drhd->devices[i]) | ||
3253 | continue; | ||
3254 | drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; | ||
3255 | } | ||
3251 | } | 3256 | } |
3252 | } | 3257 | } |
3253 | } | 3258 | } |
@@ -3950,7 +3955,11 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) | |||
3950 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { | 3955 | if (!(ggc & GGC_MEMORY_VT_ENABLED)) { |
3951 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); | 3956 | printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); |
3952 | dmar_map_gfx = 0; | 3957 | dmar_map_gfx = 0; |
3953 | } | 3958 | } else if (dmar_map_gfx) { |
3959 | /* we have to ensure the gfx device is idle before we flush */ | ||
3960 | printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n"); | ||
3961 | intel_iommu_strict = 1; | ||
3962 | } | ||
3954 | } | 3963 | } |
3955 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); | 3964 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); |
3956 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); | 3965 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0044, quirk_calpella_no_shadow_gtt); |
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h index 91567bbdb027..0d2f727e96be 100644 --- a/include/drm/drm_dp_helper.h +++ b/include/drm/drm_dp_helper.h | |||
@@ -74,6 +74,20 @@ | |||
74 | 74 | ||
75 | #define DP_TRAINING_AUX_RD_INTERVAL 0x00e | 75 | #define DP_TRAINING_AUX_RD_INTERVAL 0x00e |
76 | 76 | ||
77 | #define DP_PSR_SUPPORT 0x070 | ||
78 | # define DP_PSR_IS_SUPPORTED 1 | ||
79 | #define DP_PSR_CAPS 0x071 | ||
80 | # define DP_PSR_NO_TRAIN_ON_EXIT 1 | ||
81 | # define DP_PSR_SETUP_TIME_330 (0 << 1) | ||
82 | # define DP_PSR_SETUP_TIME_275 (1 << 1) | ||
83 | # define DP_PSR_SETUP_TIME_220 (2 << 1) | ||
84 | # define DP_PSR_SETUP_TIME_165 (3 << 1) | ||
85 | # define DP_PSR_SETUP_TIME_110 (4 << 1) | ||
86 | # define DP_PSR_SETUP_TIME_55 (5 << 1) | ||
87 | # define DP_PSR_SETUP_TIME_0 (6 << 1) | ||
88 | # define DP_PSR_SETUP_TIME_MASK (7 << 1) | ||
89 | # define DP_PSR_SETUP_TIME_SHIFT 1 | ||
90 | |||
77 | /* link configuration */ | 91 | /* link configuration */ |
78 | #define DP_LINK_BW_SET 0x100 | 92 | #define DP_LINK_BW_SET 0x100 |
79 | # define DP_LINK_BW_1_62 0x06 | 93 | # define DP_LINK_BW_1_62 0x06 |
@@ -133,6 +147,18 @@ | |||
133 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 | 147 | #define DP_MAIN_LINK_CHANNEL_CODING_SET 0x108 |
134 | # define DP_SET_ANSI_8B10B (1 << 0) | 148 | # define DP_SET_ANSI_8B10B (1 << 0) |
135 | 149 | ||
150 | #define DP_PSR_EN_CFG 0x170 | ||
151 | # define DP_PSR_ENABLE (1 << 0) | ||
152 | # define DP_PSR_MAIN_LINK_ACTIVE (1 << 1) | ||
153 | # define DP_PSR_CRC_VERIFICATION (1 << 2) | ||
154 | # define DP_PSR_FRAME_CAPTURE (1 << 3) | ||
155 | |||
156 | #define DP_DEVICE_SERVICE_IRQ_VECTOR 0x201 | ||
157 | # define DP_REMOTE_CONTROL_COMMAND_PENDING (1 << 0) | ||
158 | # define DP_AUTOMATED_TEST_REQUEST (1 << 1) | ||
159 | # define DP_CP_IRQ (1 << 2) | ||
160 | # define DP_SINK_SPECIFIC_IRQ (1 << 6) | ||
161 | |||
136 | #define DP_LANE0_1_STATUS 0x202 | 162 | #define DP_LANE0_1_STATUS 0x202 |
137 | #define DP_LANE2_3_STATUS 0x203 | 163 | #define DP_LANE2_3_STATUS 0x203 |
138 | # define DP_LANE_CR_DONE (1 << 0) | 164 | # define DP_LANE_CR_DONE (1 << 0) |
@@ -165,10 +191,45 @@ | |||
165 | # define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 | 191 | # define DP_ADJUST_PRE_EMPHASIS_LANE1_MASK 0xc0 |
166 | # define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 | 192 | # define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6 |
167 | 193 | ||
194 | #define DP_TEST_REQUEST 0x218 | ||
195 | # define DP_TEST_LINK_TRAINING (1 << 0) | ||
196 | # define DP_TEST_LINK_PATTERN (1 << 1) | ||
197 | # define DP_TEST_LINK_EDID_READ (1 << 2) | ||
198 | # define DP_TEST_LINK_PHY_TEST_PATTERN (1 << 3) /* DPCD >= 1.1 */ | ||
199 | |||
200 | #define DP_TEST_LINK_RATE 0x219 | ||
201 | # define DP_LINK_RATE_162 (0x6) | ||
202 | # define DP_LINK_RATE_27 (0xa) | ||
203 | |||
204 | #define DP_TEST_LANE_COUNT 0x220 | ||
205 | |||
206 | #define DP_TEST_PATTERN 0x221 | ||
207 | |||
208 | #define DP_TEST_RESPONSE 0x260 | ||
209 | # define DP_TEST_ACK (1 << 0) | ||
210 | # define DP_TEST_NAK (1 << 1) | ||
211 | # define DP_TEST_EDID_CHECKSUM_WRITE (1 << 2) | ||
212 | |||
168 | #define DP_SET_POWER 0x600 | 213 | #define DP_SET_POWER 0x600 |
169 | # define DP_SET_POWER_D0 0x1 | 214 | # define DP_SET_POWER_D0 0x1 |
170 | # define DP_SET_POWER_D3 0x2 | 215 | # define DP_SET_POWER_D3 0x2 |
171 | 216 | ||
217 | #define DP_PSR_ERROR_STATUS 0x2006 | ||
218 | # define DP_PSR_LINK_CRC_ERROR (1 << 0) | ||
219 | # define DP_PSR_RFB_STORAGE_ERROR (1 << 1) | ||
220 | |||
221 | #define DP_PSR_ESI 0x2007 | ||
222 | # define DP_PSR_CAPS_CHANGE (1 << 0) | ||
223 | |||
224 | #define DP_PSR_STATUS 0x2008 | ||
225 | # define DP_PSR_SINK_INACTIVE 0 | ||
226 | # define DP_PSR_SINK_ACTIVE_SRC_SYNCED 1 | ||
227 | # define DP_PSR_SINK_ACTIVE_RFB 2 | ||
228 | # define DP_PSR_SINK_ACTIVE_SINK_SYNCED 3 | ||
229 | # define DP_PSR_SINK_ACTIVE_RESYNC 4 | ||
230 | # define DP_PSR_SINK_INTERNAL_ERROR 7 | ||
231 | # define DP_PSR_SINK_STATE_MASK 0x07 | ||
232 | |||
172 | #define MODE_I2C_START 1 | 233 | #define MODE_I2C_START 1 |
173 | #define MODE_I2C_WRITE 2 | 234 | #define MODE_I2C_WRITE 2 |
174 | #define MODE_I2C_READ 4 | 235 | #define MODE_I2C_READ 4 |
diff --git a/include/drm/intel-gtt.h b/include/drm/intel-gtt.h index 9e343c0998b4..b174620cc9b3 100644 --- a/include/drm/intel-gtt.h +++ b/include/drm/intel-gtt.h | |||
@@ -13,6 +13,8 @@ const struct intel_gtt { | |||
13 | unsigned int gtt_mappable_entries; | 13 | unsigned int gtt_mappable_entries; |
14 | /* Whether i915 needs to use the dmar apis or not. */ | 14 | /* Whether i915 needs to use the dmar apis or not. */ |
15 | unsigned int needs_dmar : 1; | 15 | unsigned int needs_dmar : 1; |
16 | /* Whether we idle the gpu before mapping/unmapping */ | ||
17 | unsigned int do_idle_maps : 1; | ||
16 | } *intel_gtt_get(void); | 18 | } *intel_gtt_get(void); |
17 | 19 | ||
18 | void intel_gtt_chipset_flush(void); | 20 | void intel_gtt_chipset_flush(void); |
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h index 8cdcc2a199ad..1feeb5263565 100644 --- a/include/linux/io-mapping.h +++ b/include/linux/io-mapping.h | |||
@@ -117,6 +117,8 @@ io_mapping_unmap(void __iomem *vaddr) | |||
117 | 117 | ||
118 | #else | 118 | #else |
119 | 119 | ||
120 | #include <linux/uaccess.h> | ||
121 | |||
120 | /* this struct isn't actually defined anywhere */ | 122 | /* this struct isn't actually defined anywhere */ |
121 | struct io_mapping; | 123 | struct io_mapping; |
122 | 124 | ||
@@ -138,12 +140,14 @@ static inline void __iomem * | |||
138 | io_mapping_map_atomic_wc(struct io_mapping *mapping, | 140 | io_mapping_map_atomic_wc(struct io_mapping *mapping, |
139 | unsigned long offset) | 141 | unsigned long offset) |
140 | { | 142 | { |
143 | pagefault_disable(); | ||
141 | return ((char __force __iomem *) mapping) + offset; | 144 | return ((char __force __iomem *) mapping) + offset; |
142 | } | 145 | } |
143 | 146 | ||
144 | static inline void | 147 | static inline void |
145 | io_mapping_unmap_atomic(void __iomem *vaddr) | 148 | io_mapping_unmap_atomic(void __iomem *vaddr) |
146 | { | 149 | { |
150 | pagefault_enable(); | ||
147 | } | 151 | } |
148 | 152 | ||
149 | /* Non-atomic map/unmap */ | 153 | /* Non-atomic map/unmap */ |