diff options
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r-- | drivers/gpu/drm/i915/i915_dma.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.c | 3 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_gem.c | 602 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_reg.h | 1 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/i915_suspend.c | 40 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_acpi.c | 34 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_bios.c | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_crt.c | 159 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_display.c | 95 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_dp.c | 208 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_drv.h | 2 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_i2c.c | 11 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_lvds.c | 117 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.c | 49 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_ringbuffer.h | 5 | ||||
-rw-r--r-- | drivers/gpu/drm/i915/intel_sdvo.c | 93 |
17 files changed, 815 insertions, 611 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index 7a26f4dd21ae..e6800819bca8 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c | |||
@@ -767,6 +767,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | |||
767 | case I915_PARAM_HAS_BLT: | 767 | case I915_PARAM_HAS_BLT: |
768 | value = HAS_BLT(dev); | 768 | value = HAS_BLT(dev); |
769 | break; | 769 | break; |
770 | case I915_PARAM_HAS_COHERENT_RINGS: | ||
771 | value = 1; | ||
772 | break; | ||
770 | default: | 773 | default: |
771 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", | 774 | DRM_DEBUG_DRIVER("Unknown parameter %d\n", |
772 | param->param); | 775 | param->param); |
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 80745f85902c..f737960712e6 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c | |||
@@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = { | |||
150 | 150 | ||
151 | static const struct intel_device_info intel_ironlake_m_info = { | 151 | static const struct intel_device_info intel_ironlake_m_info = { |
152 | .gen = 5, .is_mobile = 1, | 152 | .gen = 5, .is_mobile = 1, |
153 | .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, | 153 | .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1, |
154 | .has_fbc = 0, /* disabled due to buggy hardware */ | ||
154 | .has_bsd_ring = 1, | 155 | .has_bsd_ring = 1, |
155 | }; | 156 | }; |
156 | 157 | ||
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 90414ae86afc..409826da3099 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h | |||
@@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj); | |||
1045 | int i915_gem_object_set_domain(struct drm_gem_object *obj, | 1045 | int i915_gem_object_set_domain(struct drm_gem_object *obj, |
1046 | uint32_t read_domains, | 1046 | uint32_t read_domains, |
1047 | uint32_t write_domain); | 1047 | uint32_t write_domain); |
1048 | int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | ||
1049 | bool interruptible); | ||
1048 | int i915_gem_init_ringbuffer(struct drm_device *dev); | 1050 | int i915_gem_init_ringbuffer(struct drm_device *dev); |
1049 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); | 1051 | void i915_gem_cleanup_ringbuffer(struct drm_device *dev); |
1050 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, | 1052 | int i915_gem_do_init(struct drm_device *dev, unsigned long start, |
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index ef188e391406..275ec6ed43ae 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c | |||
@@ -38,8 +38,7 @@ | |||
38 | 38 | ||
39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); | 39 | static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); |
40 | 40 | ||
41 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 41 | static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); |
42 | bool pipelined); | ||
43 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); | 42 | static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); |
44 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); | 43 | static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); |
45 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, | 44 | static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, |
@@ -547,6 +546,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
547 | struct drm_i915_gem_object *obj_priv; | 546 | struct drm_i915_gem_object *obj_priv; |
548 | int ret = 0; | 547 | int ret = 0; |
549 | 548 | ||
549 | if (args->size == 0) | ||
550 | return 0; | ||
551 | |||
552 | if (!access_ok(VERIFY_WRITE, | ||
553 | (char __user *)(uintptr_t)args->data_ptr, | ||
554 | args->size)) | ||
555 | return -EFAULT; | ||
556 | |||
557 | ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, | ||
558 | args->size); | ||
559 | if (ret) | ||
560 | return -EFAULT; | ||
561 | |||
550 | ret = i915_mutex_lock_interruptible(dev); | 562 | ret = i915_mutex_lock_interruptible(dev); |
551 | if (ret) | 563 | if (ret) |
552 | return ret; | 564 | return ret; |
@@ -564,23 +576,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data, | |||
564 | goto out; | 576 | goto out; |
565 | } | 577 | } |
566 | 578 | ||
567 | if (args->size == 0) | ||
568 | goto out; | ||
569 | |||
570 | if (!access_ok(VERIFY_WRITE, | ||
571 | (char __user *)(uintptr_t)args->data_ptr, | ||
572 | args->size)) { | ||
573 | ret = -EFAULT; | ||
574 | goto out; | ||
575 | } | ||
576 | |||
577 | ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr, | ||
578 | args->size); | ||
579 | if (ret) { | ||
580 | ret = -EFAULT; | ||
581 | goto out; | ||
582 | } | ||
583 | |||
584 | ret = i915_gem_object_get_pages_or_evict(obj); | 579 | ret = i915_gem_object_get_pages_or_evict(obj); |
585 | if (ret) | 580 | if (ret) |
586 | goto out; | 581 | goto out; |
@@ -981,7 +976,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
981 | struct drm_i915_gem_pwrite *args = data; | 976 | struct drm_i915_gem_pwrite *args = data; |
982 | struct drm_gem_object *obj; | 977 | struct drm_gem_object *obj; |
983 | struct drm_i915_gem_object *obj_priv; | 978 | struct drm_i915_gem_object *obj_priv; |
984 | int ret = 0; | 979 | int ret; |
980 | |||
981 | if (args->size == 0) | ||
982 | return 0; | ||
983 | |||
984 | if (!access_ok(VERIFY_READ, | ||
985 | (char __user *)(uintptr_t)args->data_ptr, | ||
986 | args->size)) | ||
987 | return -EFAULT; | ||
988 | |||
989 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | ||
990 | args->size); | ||
991 | if (ret) | ||
992 | return -EFAULT; | ||
985 | 993 | ||
986 | ret = i915_mutex_lock_interruptible(dev); | 994 | ret = i915_mutex_lock_interruptible(dev); |
987 | if (ret) | 995 | if (ret) |
@@ -994,30 +1002,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | |||
994 | } | 1002 | } |
995 | obj_priv = to_intel_bo(obj); | 1003 | obj_priv = to_intel_bo(obj); |
996 | 1004 | ||
997 | |||
998 | /* Bounds check destination. */ | 1005 | /* Bounds check destination. */ |
999 | if (args->offset > obj->size || args->size > obj->size - args->offset) { | 1006 | if (args->offset > obj->size || args->size > obj->size - args->offset) { |
1000 | ret = -EINVAL; | 1007 | ret = -EINVAL; |
1001 | goto out; | 1008 | goto out; |
1002 | } | 1009 | } |
1003 | 1010 | ||
1004 | if (args->size == 0) | ||
1005 | goto out; | ||
1006 | |||
1007 | if (!access_ok(VERIFY_READ, | ||
1008 | (char __user *)(uintptr_t)args->data_ptr, | ||
1009 | args->size)) { | ||
1010 | ret = -EFAULT; | ||
1011 | goto out; | ||
1012 | } | ||
1013 | |||
1014 | ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr, | ||
1015 | args->size); | ||
1016 | if (ret) { | ||
1017 | ret = -EFAULT; | ||
1018 | goto out; | ||
1019 | } | ||
1020 | |||
1021 | /* We can only do the GTT pwrite on untiled buffers, as otherwise | 1011 | /* We can only do the GTT pwrite on untiled buffers, as otherwise |
1022 | * it would end up going through the fenced access, and we'll get | 1012 | * it would end up going through the fenced access, and we'll get |
1023 | * different detiling behavior between reading and writing. | 1013 | * different detiling behavior between reading and writing. |
@@ -2603,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj, | |||
2603 | if (reg->gpu) { | 2593 | if (reg->gpu) { |
2604 | int ret; | 2594 | int ret; |
2605 | 2595 | ||
2606 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | 2596 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2607 | if (ret) | 2597 | if (ret) |
2608 | return ret; | 2598 | return ret; |
2609 | 2599 | ||
@@ -2751,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj) | |||
2751 | 2741 | ||
2752 | /** Flushes any GPU write domain for the object if it's dirty. */ | 2742 | /** Flushes any GPU write domain for the object if it's dirty. */ |
2753 | static int | 2743 | static int |
2754 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | 2744 | i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj) |
2755 | bool pipelined) | ||
2756 | { | 2745 | { |
2757 | struct drm_device *dev = obj->dev; | 2746 | struct drm_device *dev = obj->dev; |
2758 | uint32_t old_write_domain; | 2747 | uint32_t old_write_domain; |
@@ -2771,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, | |||
2771 | obj->read_domains, | 2760 | obj->read_domains, |
2772 | old_write_domain); | 2761 | old_write_domain); |
2773 | 2762 | ||
2774 | if (pipelined) | 2763 | return 0; |
2775 | return 0; | ||
2776 | |||
2777 | return i915_gem_object_wait_rendering(obj, true); | ||
2778 | } | 2764 | } |
2779 | 2765 | ||
2780 | /** Flushes the GTT write domain for the object if it's dirty. */ | 2766 | /** Flushes the GTT write domain for the object if it's dirty. */ |
@@ -2835,18 +2821,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write) | |||
2835 | if (obj_priv->gtt_space == NULL) | 2821 | if (obj_priv->gtt_space == NULL) |
2836 | return -EINVAL; | 2822 | return -EINVAL; |
2837 | 2823 | ||
2838 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 2824 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2839 | if (ret != 0) | 2825 | if (ret != 0) |
2840 | return ret; | 2826 | return ret; |
2827 | ret = i915_gem_object_wait_rendering(obj, true); | ||
2828 | if (ret) | ||
2829 | return ret; | ||
2841 | 2830 | ||
2842 | i915_gem_object_flush_cpu_write_domain(obj); | 2831 | i915_gem_object_flush_cpu_write_domain(obj); |
2843 | 2832 | ||
2844 | if (write) { | ||
2845 | ret = i915_gem_object_wait_rendering(obj, true); | ||
2846 | if (ret) | ||
2847 | return ret; | ||
2848 | } | ||
2849 | |||
2850 | old_write_domain = obj->write_domain; | 2833 | old_write_domain = obj->write_domain; |
2851 | old_read_domains = obj->read_domains; | 2834 | old_read_domains = obj->read_domains; |
2852 | 2835 | ||
@@ -2884,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | |||
2884 | if (obj_priv->gtt_space == NULL) | 2867 | if (obj_priv->gtt_space == NULL) |
2885 | return -EINVAL; | 2868 | return -EINVAL; |
2886 | 2869 | ||
2887 | ret = i915_gem_object_flush_gpu_write_domain(obj, true); | 2870 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2888 | if (ret) | 2871 | if (ret) |
2889 | return ret; | 2872 | return ret; |
2890 | 2873 | ||
@@ -2907,6 +2890,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj, | |||
2907 | return 0; | 2890 | return 0; |
2908 | } | 2891 | } |
2909 | 2892 | ||
2893 | int | ||
2894 | i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj, | ||
2895 | bool interruptible) | ||
2896 | { | ||
2897 | if (!obj->active) | ||
2898 | return 0; | ||
2899 | |||
2900 | if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) | ||
2901 | i915_gem_flush_ring(obj->base.dev, NULL, obj->ring, | ||
2902 | 0, obj->base.write_domain); | ||
2903 | |||
2904 | return i915_gem_object_wait_rendering(&obj->base, interruptible); | ||
2905 | } | ||
2906 | |||
2910 | /** | 2907 | /** |
2911 | * Moves a single object to the CPU read, and possibly write domain. | 2908 | * Moves a single object to the CPU read, and possibly write domain. |
2912 | * | 2909 | * |
@@ -2919,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2919 | uint32_t old_write_domain, old_read_domains; | 2916 | uint32_t old_write_domain, old_read_domains; |
2920 | int ret; | 2917 | int ret; |
2921 | 2918 | ||
2922 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 2919 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
2923 | if (ret != 0) | 2920 | if (ret != 0) |
2924 | return ret; | 2921 | return ret; |
2922 | ret = i915_gem_object_wait_rendering(obj, true); | ||
2923 | if (ret) | ||
2924 | return ret; | ||
2925 | 2925 | ||
2926 | i915_gem_object_flush_gtt_write_domain(obj); | 2926 | i915_gem_object_flush_gtt_write_domain(obj); |
2927 | 2927 | ||
@@ -2930,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write) | |||
2930 | */ | 2930 | */ |
2931 | i915_gem_object_set_to_full_cpu_read_domain(obj); | 2931 | i915_gem_object_set_to_full_cpu_read_domain(obj); |
2932 | 2932 | ||
2933 | if (write) { | ||
2934 | ret = i915_gem_object_wait_rendering(obj, true); | ||
2935 | if (ret) | ||
2936 | return ret; | ||
2937 | } | ||
2938 | |||
2939 | old_write_domain = obj->write_domain; | 2933 | old_write_domain = obj->write_domain; |
2940 | old_read_domains = obj->read_domains; | 2934 | old_read_domains = obj->read_domains; |
2941 | 2935 | ||
@@ -3200,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3200 | if (offset == 0 && size == obj->size) | 3194 | if (offset == 0 && size == obj->size) |
3201 | return i915_gem_object_set_to_cpu_domain(obj, 0); | 3195 | return i915_gem_object_set_to_cpu_domain(obj, 0); |
3202 | 3196 | ||
3203 | ret = i915_gem_object_flush_gpu_write_domain(obj, false); | 3197 | ret = i915_gem_object_flush_gpu_write_domain(obj); |
3204 | if (ret != 0) | 3198 | if (ret != 0) |
3205 | return ret; | 3199 | return ret; |
3200 | ret = i915_gem_object_wait_rendering(obj, true); | ||
3201 | if (ret) | ||
3202 | return ret; | ||
3203 | |||
3206 | i915_gem_object_flush_gtt_write_domain(obj); | 3204 | i915_gem_object_flush_gtt_write_domain(obj); |
3207 | 3205 | ||
3208 | /* If we're already fully in the CPU read domain, we're done. */ | 3206 | /* If we're already fully in the CPU read domain, we're done. */ |
@@ -3249,192 +3247,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj, | |||
3249 | return 0; | 3247 | return 0; |
3250 | } | 3248 | } |
3251 | 3249 | ||
3252 | /** | ||
3253 | * Pin an object to the GTT and evaluate the relocations landing in it. | ||
3254 | */ | ||
3255 | static int | 3250 | static int |
3256 | i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, | 3251 | i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj, |
3257 | struct drm_file *file_priv, | 3252 | struct drm_file *file_priv, |
3258 | struct drm_i915_gem_exec_object2 *entry) | 3253 | struct drm_i915_gem_exec_object2 *entry, |
3254 | struct drm_i915_gem_relocation_entry *reloc) | ||
3259 | { | 3255 | { |
3260 | struct drm_device *dev = obj->base.dev; | 3256 | struct drm_device *dev = obj->base.dev; |
3261 | drm_i915_private_t *dev_priv = dev->dev_private; | 3257 | struct drm_gem_object *target_obj; |
3262 | struct drm_i915_gem_relocation_entry __user *user_relocs; | 3258 | uint32_t target_offset; |
3263 | struct drm_gem_object *target_obj = NULL; | 3259 | int ret = -EINVAL; |
3264 | uint32_t target_handle = 0; | ||
3265 | int i, ret = 0; | ||
3266 | 3260 | ||
3267 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; | 3261 | target_obj = drm_gem_object_lookup(dev, file_priv, |
3268 | for (i = 0; i < entry->relocation_count; i++) { | 3262 | reloc->target_handle); |
3269 | struct drm_i915_gem_relocation_entry reloc; | 3263 | if (target_obj == NULL) |
3270 | uint32_t target_offset; | 3264 | return -ENOENT; |
3271 | 3265 | ||
3272 | if (__copy_from_user_inatomic(&reloc, | 3266 | target_offset = to_intel_bo(target_obj)->gtt_offset; |
3273 | user_relocs+i, | ||
3274 | sizeof(reloc))) { | ||
3275 | ret = -EFAULT; | ||
3276 | break; | ||
3277 | } | ||
3278 | 3267 | ||
3279 | if (reloc.target_handle != target_handle) { | 3268 | #if WATCH_RELOC |
3280 | drm_gem_object_unreference(target_obj); | 3269 | DRM_INFO("%s: obj %p offset %08x target %d " |
3270 | "read %08x write %08x gtt %08x " | ||
3271 | "presumed %08x delta %08x\n", | ||
3272 | __func__, | ||
3273 | obj, | ||
3274 | (int) reloc->offset, | ||
3275 | (int) reloc->target_handle, | ||
3276 | (int) reloc->read_domains, | ||
3277 | (int) reloc->write_domain, | ||
3278 | (int) target_offset, | ||
3279 | (int) reloc->presumed_offset, | ||
3280 | reloc->delta); | ||
3281 | #endif | ||
3281 | 3282 | ||
3282 | target_obj = drm_gem_object_lookup(dev, file_priv, | 3283 | /* The target buffer should have appeared before us in the |
3283 | reloc.target_handle); | 3284 | * exec_object list, so it should have a GTT space bound by now. |
3284 | if (target_obj == NULL) { | 3285 | */ |
3285 | ret = -ENOENT; | 3286 | if (target_offset == 0) { |
3286 | break; | 3287 | DRM_ERROR("No GTT space found for object %d\n", |
3287 | } | 3288 | reloc->target_handle); |
3289 | goto err; | ||
3290 | } | ||
3288 | 3291 | ||
3289 | target_handle = reloc.target_handle; | 3292 | /* Validate that the target is in a valid r/w GPU domain */ |
3290 | } | 3293 | if (reloc->write_domain & (reloc->write_domain - 1)) { |
3291 | target_offset = to_intel_bo(target_obj)->gtt_offset; | 3294 | DRM_ERROR("reloc with multiple write domains: " |
3295 | "obj %p target %d offset %d " | ||
3296 | "read %08x write %08x", | ||
3297 | obj, reloc->target_handle, | ||
3298 | (int) reloc->offset, | ||
3299 | reloc->read_domains, | ||
3300 | reloc->write_domain); | ||
3301 | goto err; | ||
3302 | } | ||
3303 | if (reloc->write_domain & I915_GEM_DOMAIN_CPU || | ||
3304 | reloc->read_domains & I915_GEM_DOMAIN_CPU) { | ||
3305 | DRM_ERROR("reloc with read/write CPU domains: " | ||
3306 | "obj %p target %d offset %d " | ||
3307 | "read %08x write %08x", | ||
3308 | obj, reloc->target_handle, | ||
3309 | (int) reloc->offset, | ||
3310 | reloc->read_domains, | ||
3311 | reloc->write_domain); | ||
3312 | goto err; | ||
3313 | } | ||
3314 | if (reloc->write_domain && target_obj->pending_write_domain && | ||
3315 | reloc->write_domain != target_obj->pending_write_domain) { | ||
3316 | DRM_ERROR("Write domain conflict: " | ||
3317 | "obj %p target %d offset %d " | ||
3318 | "new %08x old %08x\n", | ||
3319 | obj, reloc->target_handle, | ||
3320 | (int) reloc->offset, | ||
3321 | reloc->write_domain, | ||
3322 | target_obj->pending_write_domain); | ||
3323 | goto err; | ||
3324 | } | ||
3292 | 3325 | ||
3293 | #if WATCH_RELOC | 3326 | target_obj->pending_read_domains |= reloc->read_domains; |
3294 | DRM_INFO("%s: obj %p offset %08x target %d " | 3327 | target_obj->pending_write_domain |= reloc->write_domain; |
3295 | "read %08x write %08x gtt %08x " | ||
3296 | "presumed %08x delta %08x\n", | ||
3297 | __func__, | ||
3298 | obj, | ||
3299 | (int) reloc.offset, | ||
3300 | (int) reloc.target_handle, | ||
3301 | (int) reloc.read_domains, | ||
3302 | (int) reloc.write_domain, | ||
3303 | (int) target_offset, | ||
3304 | (int) reloc.presumed_offset, | ||
3305 | reloc.delta); | ||
3306 | #endif | ||
3307 | 3328 | ||
3308 | /* The target buffer should have appeared before us in the | 3329 | /* If the relocation already has the right value in it, no |
3309 | * exec_object list, so it should have a GTT space bound by now. | 3330 | * more work needs to be done. |
3310 | */ | 3331 | */ |
3311 | if (target_offset == 0) { | 3332 | if (target_offset == reloc->presumed_offset) |
3312 | DRM_ERROR("No GTT space found for object %d\n", | 3333 | goto out; |
3313 | reloc.target_handle); | ||
3314 | ret = -EINVAL; | ||
3315 | break; | ||
3316 | } | ||
3317 | 3334 | ||
3318 | /* Validate that the target is in a valid r/w GPU domain */ | 3335 | /* Check that the relocation address is valid... */ |
3319 | if (reloc.write_domain & (reloc.write_domain - 1)) { | 3336 | if (reloc->offset > obj->base.size - 4) { |
3320 | DRM_ERROR("reloc with multiple write domains: " | 3337 | DRM_ERROR("Relocation beyond object bounds: " |
3321 | "obj %p target %d offset %d " | 3338 | "obj %p target %d offset %d size %d.\n", |
3322 | "read %08x write %08x", | 3339 | obj, reloc->target_handle, |
3323 | obj, reloc.target_handle, | 3340 | (int) reloc->offset, |
3324 | (int) reloc.offset, | 3341 | (int) obj->base.size); |
3325 | reloc.read_domains, | 3342 | goto err; |
3326 | reloc.write_domain); | 3343 | } |
3327 | ret = -EINVAL; | 3344 | if (reloc->offset & 3) { |
3328 | break; | 3345 | DRM_ERROR("Relocation not 4-byte aligned: " |
3329 | } | 3346 | "obj %p target %d offset %d.\n", |
3330 | if (reloc.write_domain & I915_GEM_DOMAIN_CPU || | 3347 | obj, reloc->target_handle, |
3331 | reloc.read_domains & I915_GEM_DOMAIN_CPU) { | 3348 | (int) reloc->offset); |
3332 | DRM_ERROR("reloc with read/write CPU domains: " | 3349 | goto err; |
3333 | "obj %p target %d offset %d " | 3350 | } |
3334 | "read %08x write %08x", | ||
3335 | obj, reloc.target_handle, | ||
3336 | (int) reloc.offset, | ||
3337 | reloc.read_domains, | ||
3338 | reloc.write_domain); | ||
3339 | ret = -EINVAL; | ||
3340 | break; | ||
3341 | } | ||
3342 | if (reloc.write_domain && target_obj->pending_write_domain && | ||
3343 | reloc.write_domain != target_obj->pending_write_domain) { | ||
3344 | DRM_ERROR("Write domain conflict: " | ||
3345 | "obj %p target %d offset %d " | ||
3346 | "new %08x old %08x\n", | ||
3347 | obj, reloc.target_handle, | ||
3348 | (int) reloc.offset, | ||
3349 | reloc.write_domain, | ||
3350 | target_obj->pending_write_domain); | ||
3351 | ret = -EINVAL; | ||
3352 | break; | ||
3353 | } | ||
3354 | 3351 | ||
3355 | target_obj->pending_read_domains |= reloc.read_domains; | 3352 | /* and points to somewhere within the target object. */ |
3356 | target_obj->pending_write_domain |= reloc.write_domain; | 3353 | if (reloc->delta >= target_obj->size) { |
3354 | DRM_ERROR("Relocation beyond target object bounds: " | ||
3355 | "obj %p target %d delta %d size %d.\n", | ||
3356 | obj, reloc->target_handle, | ||
3357 | (int) reloc->delta, | ||
3358 | (int) target_obj->size); | ||
3359 | goto err; | ||
3360 | } | ||
3357 | 3361 | ||
3358 | /* If the relocation already has the right value in it, no | 3362 | reloc->delta += target_offset; |
3359 | * more work needs to be done. | 3363 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { |
3360 | */ | 3364 | uint32_t page_offset = reloc->offset & ~PAGE_MASK; |
3361 | if (target_offset == reloc.presumed_offset) | 3365 | char *vaddr; |
3362 | continue; | ||
3363 | 3366 | ||
3364 | /* Check that the relocation address is valid... */ | 3367 | vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]); |
3365 | if (reloc.offset > obj->base.size - 4) { | 3368 | *(uint32_t *)(vaddr + page_offset) = reloc->delta; |
3366 | DRM_ERROR("Relocation beyond object bounds: " | 3369 | kunmap_atomic(vaddr); |
3367 | "obj %p target %d offset %d size %d.\n", | 3370 | } else { |
3368 | obj, reloc.target_handle, | 3371 | struct drm_i915_private *dev_priv = dev->dev_private; |
3369 | (int) reloc.offset, (int) obj->base.size); | 3372 | uint32_t __iomem *reloc_entry; |
3370 | ret = -EINVAL; | 3373 | void __iomem *reloc_page; |
3371 | break; | ||
3372 | } | ||
3373 | if (reloc.offset & 3) { | ||
3374 | DRM_ERROR("Relocation not 4-byte aligned: " | ||
3375 | "obj %p target %d offset %d.\n", | ||
3376 | obj, reloc.target_handle, | ||
3377 | (int) reloc.offset); | ||
3378 | ret = -EINVAL; | ||
3379 | break; | ||
3380 | } | ||
3381 | 3374 | ||
3382 | /* and points to somewhere within the target object. */ | 3375 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); |
3383 | if (reloc.delta >= target_obj->size) { | 3376 | if (ret) |
3384 | DRM_ERROR("Relocation beyond target object bounds: " | 3377 | goto err; |
3385 | "obj %p target %d delta %d size %d.\n", | ||
3386 | obj, reloc.target_handle, | ||
3387 | (int) reloc.delta, (int) target_obj->size); | ||
3388 | ret = -EINVAL; | ||
3389 | break; | ||
3390 | } | ||
3391 | 3378 | ||
3392 | reloc.delta += target_offset; | 3379 | /* Map the page containing the relocation we're going to perform. */ |
3393 | if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { | 3380 | reloc->offset += obj->gtt_offset; |
3394 | uint32_t page_offset = reloc.offset & ~PAGE_MASK; | 3381 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, |
3395 | char *vaddr; | 3382 | reloc->offset & PAGE_MASK); |
3383 | reloc_entry = (uint32_t __iomem *) | ||
3384 | (reloc_page + (reloc->offset & ~PAGE_MASK)); | ||
3385 | iowrite32(reloc->delta, reloc_entry); | ||
3386 | io_mapping_unmap_atomic(reloc_page); | ||
3387 | } | ||
3396 | 3388 | ||
3397 | vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); | 3389 | /* and update the user's relocation entry */ |
3398 | *(uint32_t *)(vaddr + page_offset) = reloc.delta; | 3390 | reloc->presumed_offset = target_offset; |
3399 | kunmap_atomic(vaddr); | ||
3400 | } else { | ||
3401 | uint32_t __iomem *reloc_entry; | ||
3402 | void __iomem *reloc_page; | ||
3403 | 3391 | ||
3404 | ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); | 3392 | out: |
3405 | if (ret) | 3393 | ret = 0; |
3406 | break; | 3394 | err: |
3395 | drm_gem_object_unreference(target_obj); | ||
3396 | return ret; | ||
3397 | } | ||
3407 | 3398 | ||
3408 | /* Map the page containing the relocation we're going to perform. */ | 3399 | static int |
3409 | reloc.offset += obj->gtt_offset; | 3400 | i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj, |
3410 | reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, | 3401 | struct drm_file *file_priv, |
3411 | reloc.offset & PAGE_MASK); | 3402 | struct drm_i915_gem_exec_object2 *entry) |
3412 | reloc_entry = (uint32_t __iomem *) | 3403 | { |
3413 | (reloc_page + (reloc.offset & ~PAGE_MASK)); | 3404 | struct drm_i915_gem_relocation_entry __user *user_relocs; |
3414 | iowrite32(reloc.delta, reloc_entry); | 3405 | int i, ret; |
3415 | io_mapping_unmap_atomic(reloc_page); | 3406 | |
3416 | } | 3407 | user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; |
3408 | for (i = 0; i < entry->relocation_count; i++) { | ||
3409 | struct drm_i915_gem_relocation_entry reloc; | ||
3410 | |||
3411 | if (__copy_from_user_inatomic(&reloc, | ||
3412 | user_relocs+i, | ||
3413 | sizeof(reloc))) | ||
3414 | return -EFAULT; | ||
3415 | |||
3416 | ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc); | ||
3417 | if (ret) | ||
3418 | return ret; | ||
3417 | 3419 | ||
3418 | /* and update the user's relocation entry */ | ||
3419 | reloc.presumed_offset = target_offset; | ||
3420 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, | 3420 | if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, |
3421 | &reloc.presumed_offset, | 3421 | &reloc.presumed_offset, |
3422 | sizeof(reloc.presumed_offset))) { | 3422 | sizeof(reloc.presumed_offset))) |
3423 | ret = -EFAULT; | 3423 | return -EFAULT; |
3424 | break; | ||
3425 | } | ||
3426 | } | 3424 | } |
3427 | 3425 | ||
3428 | drm_gem_object_unreference(target_obj); | 3426 | return 0; |
3429 | return ret; | ||
3430 | } | 3427 | } |
3431 | 3428 | ||
3432 | static int | 3429 | static int |
3433 | i915_gem_execbuffer_pin(struct drm_device *dev, | 3430 | i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj, |
3434 | struct drm_file *file, | 3431 | struct drm_file *file_priv, |
3435 | struct drm_gem_object **object_list, | 3432 | struct drm_i915_gem_exec_object2 *entry, |
3436 | struct drm_i915_gem_exec_object2 *exec_list, | 3433 | struct drm_i915_gem_relocation_entry *relocs) |
3437 | int count) | 3434 | { |
3435 | int i, ret; | ||
3436 | |||
3437 | for (i = 0; i < entry->relocation_count; i++) { | ||
3438 | ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]); | ||
3439 | if (ret) | ||
3440 | return ret; | ||
3441 | } | ||
3442 | |||
3443 | return 0; | ||
3444 | } | ||
3445 | |||
3446 | static int | ||
3447 | i915_gem_execbuffer_relocate(struct drm_device *dev, | ||
3448 | struct drm_file *file, | ||
3449 | struct drm_gem_object **object_list, | ||
3450 | struct drm_i915_gem_exec_object2 *exec_list, | ||
3451 | int count) | ||
3452 | { | ||
3453 | int i, ret; | ||
3454 | |||
3455 | for (i = 0; i < count; i++) { | ||
3456 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | ||
3457 | obj->base.pending_read_domains = 0; | ||
3458 | obj->base.pending_write_domain = 0; | ||
3459 | ret = i915_gem_execbuffer_relocate_object(obj, file, | ||
3460 | &exec_list[i]); | ||
3461 | if (ret) | ||
3462 | return ret; | ||
3463 | } | ||
3464 | |||
3465 | return 0; | ||
3466 | } | ||
3467 | |||
3468 | static int | ||
3469 | i915_gem_execbuffer_reserve(struct drm_device *dev, | ||
3470 | struct drm_file *file, | ||
3471 | struct drm_gem_object **object_list, | ||
3472 | struct drm_i915_gem_exec_object2 *exec_list, | ||
3473 | int count) | ||
3438 | { | 3474 | { |
3439 | struct drm_i915_private *dev_priv = dev->dev_private; | 3475 | struct drm_i915_private *dev_priv = dev->dev_private; |
3440 | int ret, i, retry; | 3476 | int ret, i, retry; |
@@ -3497,6 +3533,87 @@ i915_gem_execbuffer_pin(struct drm_device *dev, | |||
3497 | } | 3533 | } |
3498 | 3534 | ||
3499 | static int | 3535 | static int |
3536 | i915_gem_execbuffer_relocate_slow(struct drm_device *dev, | ||
3537 | struct drm_file *file, | ||
3538 | struct drm_gem_object **object_list, | ||
3539 | struct drm_i915_gem_exec_object2 *exec_list, | ||
3540 | int count) | ||
3541 | { | ||
3542 | struct drm_i915_gem_relocation_entry *reloc; | ||
3543 | int i, total, ret; | ||
3544 | |||
3545 | for (i = 0; i < count; i++) { | ||
3546 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | ||
3547 | obj->in_execbuffer = false; | ||
3548 | } | ||
3549 | |||
3550 | mutex_unlock(&dev->struct_mutex); | ||
3551 | |||
3552 | total = 0; | ||
3553 | for (i = 0; i < count; i++) | ||
3554 | total += exec_list[i].relocation_count; | ||
3555 | |||
3556 | reloc = drm_malloc_ab(total, sizeof(*reloc)); | ||
3557 | if (reloc == NULL) { | ||
3558 | mutex_lock(&dev->struct_mutex); | ||
3559 | return -ENOMEM; | ||
3560 | } | ||
3561 | |||
3562 | total = 0; | ||
3563 | for (i = 0; i < count; i++) { | ||
3564 | struct drm_i915_gem_relocation_entry __user *user_relocs; | ||
3565 | |||
3566 | user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr; | ||
3567 | |||
3568 | if (copy_from_user(reloc+total, user_relocs, | ||
3569 | exec_list[i].relocation_count * | ||
3570 | sizeof(*reloc))) { | ||
3571 | ret = -EFAULT; | ||
3572 | mutex_lock(&dev->struct_mutex); | ||
3573 | goto err; | ||
3574 | } | ||
3575 | |||
3576 | total += exec_list[i].relocation_count; | ||
3577 | } | ||
3578 | |||
3579 | ret = i915_mutex_lock_interruptible(dev); | ||
3580 | if (ret) { | ||
3581 | mutex_lock(&dev->struct_mutex); | ||
3582 | goto err; | ||
3583 | } | ||
3584 | |||
3585 | ret = i915_gem_execbuffer_reserve(dev, file, | ||
3586 | object_list, exec_list, | ||
3587 | count); | ||
3588 | if (ret) | ||
3589 | goto err; | ||
3590 | |||
3591 | total = 0; | ||
3592 | for (i = 0; i < count; i++) { | ||
3593 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | ||
3594 | obj->base.pending_read_domains = 0; | ||
3595 | obj->base.pending_write_domain = 0; | ||
3596 | ret = i915_gem_execbuffer_relocate_object_slow(obj, file, | ||
3597 | &exec_list[i], | ||
3598 | reloc + total); | ||
3599 | if (ret) | ||
3600 | goto err; | ||
3601 | |||
3602 | total += exec_list[i].relocation_count; | ||
3603 | } | ||
3604 | |||
3605 | /* Leave the user relocations as are, this is the painfully slow path, | ||
3606 | * and we want to avoid the complication of dropping the lock whilst | ||
3607 | * having buffers reserved in the aperture and so causing spurious | ||
3608 | * ENOSPC for random operations. | ||
3609 | */ | ||
3610 | |||
3611 | err: | ||
3612 | drm_free_large(reloc); | ||
3613 | return ret; | ||
3614 | } | ||
3615 | |||
3616 | static int | ||
3500 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, | 3617 | i915_gem_execbuffer_move_to_gpu(struct drm_device *dev, |
3501 | struct drm_file *file, | 3618 | struct drm_file *file, |
3502 | struct intel_ring_buffer *ring, | 3619 | struct intel_ring_buffer *ring, |
@@ -3625,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec, | |||
3625 | 3742 | ||
3626 | for (i = 0; i < count; i++) { | 3743 | for (i = 0; i < count; i++) { |
3627 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; | 3744 | char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; |
3628 | size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); | 3745 | int length; /* limited by fault_in_pages_readable() */ |
3746 | |||
3747 | /* First check for malicious input causing overflow */ | ||
3748 | if (exec[i].relocation_count > | ||
3749 | INT_MAX / sizeof(struct drm_i915_gem_relocation_entry)) | ||
3750 | return -EINVAL; | ||
3629 | 3751 | ||
3752 | length = exec[i].relocation_count * | ||
3753 | sizeof(struct drm_i915_gem_relocation_entry); | ||
3630 | if (!access_ok(VERIFY_READ, ptr, length)) | 3754 | if (!access_ok(VERIFY_READ, ptr, length)) |
3631 | return -EFAULT; | 3755 | return -EFAULT; |
3632 | 3756 | ||
@@ -3769,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data, | |||
3769 | } | 3893 | } |
3770 | 3894 | ||
3771 | /* Move the objects en-masse into the GTT, evicting if necessary. */ | 3895 | /* Move the objects en-masse into the GTT, evicting if necessary. */ |
3772 | ret = i915_gem_execbuffer_pin(dev, file, | 3896 | ret = i915_gem_execbuffer_reserve(dev, file, |
3773 | object_list, exec_list, | 3897 | object_list, exec_list, |
3774 | args->buffer_count); | 3898 | args->buffer_count); |
3775 | if (ret) | 3899 | if (ret) |
3776 | goto err; | 3900 | goto err; |
3777 | 3901 | ||
3778 | /* The objects are in their final locations, apply the relocations. */ | 3902 | /* The objects are in their final locations, apply the relocations. */ |
3779 | for (i = 0; i < args->buffer_count; i++) { | 3903 | ret = i915_gem_execbuffer_relocate(dev, file, |
3780 | struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); | 3904 | object_list, exec_list, |
3781 | obj->base.pending_read_domains = 0; | 3905 | args->buffer_count); |
3782 | obj->base.pending_write_domain = 0; | 3906 | if (ret) { |
3783 | ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); | 3907 | if (ret == -EFAULT) { |
3908 | ret = i915_gem_execbuffer_relocate_slow(dev, file, | ||
3909 | object_list, | ||
3910 | exec_list, | ||
3911 | args->buffer_count); | ||
3912 | BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
3913 | } | ||
3784 | if (ret) | 3914 | if (ret) |
3785 | goto err; | 3915 | goto err; |
3786 | } | 3916 | } |
@@ -4244,10 +4374,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data, | |||
4244 | * use this buffer rather sooner than later, so issuing the required | 4374 | * use this buffer rather sooner than later, so issuing the required |
4245 | * flush earlier is beneficial. | 4375 | * flush earlier is beneficial. |
4246 | */ | 4376 | */ |
4247 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) | 4377 | if (obj->write_domain & I915_GEM_GPU_DOMAINS) { |
4248 | i915_gem_flush_ring(dev, file_priv, | 4378 | i915_gem_flush_ring(dev, file_priv, |
4249 | obj_priv->ring, | 4379 | obj_priv->ring, |
4250 | 0, obj->write_domain); | 4380 | 0, obj->write_domain); |
4381 | } else if (obj_priv->ring->outstanding_lazy_request) { | ||
4382 | /* This ring is not being cleared by active usage, | ||
4383 | * so emit a request to do so. | ||
4384 | */ | ||
4385 | u32 seqno = i915_add_request(dev, | ||
4386 | NULL, NULL, | ||
4387 | obj_priv->ring); | ||
4388 | if (seqno == 0) | ||
4389 | ret = -ENOMEM; | ||
4390 | } | ||
4251 | 4391 | ||
4252 | /* Update the active list for the hardware's current position. | 4392 | /* Update the active list for the hardware's current position. |
4253 | * Otherwise this only updates on a delayed timer or when irqs | 4393 | * Otherwise this only updates on a delayed timer or when irqs |
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 25ed911a3112..878fc766a12c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h | |||
@@ -3033,6 +3033,7 @@ | |||
3033 | #define TRANS_DP_10BPC (1<<9) | 3033 | #define TRANS_DP_10BPC (1<<9) |
3034 | #define TRANS_DP_6BPC (2<<9) | 3034 | #define TRANS_DP_6BPC (2<<9) |
3035 | #define TRANS_DP_12BPC (3<<9) | 3035 | #define TRANS_DP_12BPC (3<<9) |
3036 | #define TRANS_DP_BPC_MASK (3<<9) | ||
3036 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) | 3037 | #define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) |
3037 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 | 3038 | #define TRANS_DP_VSYNC_ACTIVE_LOW 0 |
3038 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) | 3039 | #define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) |
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index 454c064f8ef7..42729d25da58 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c | |||
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev) | |||
239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) | 239 | if (drm_core_check_feature(dev, DRIVER_MODESET)) |
240 | return; | 240 | return; |
241 | 241 | ||
242 | /* Cursor state */ | ||
243 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | ||
244 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | ||
245 | dev_priv->saveCURABASE = I915_READ(CURABASE); | ||
246 | dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); | ||
247 | dev_priv->saveCURBPOS = I915_READ(CURBPOS); | ||
248 | dev_priv->saveCURBBASE = I915_READ(CURBBASE); | ||
249 | if (IS_GEN2(dev)) | ||
250 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | ||
251 | |||
242 | if (HAS_PCH_SPLIT(dev)) { | 252 | if (HAS_PCH_SPLIT(dev)) { |
243 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); | 253 | dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); |
244 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); | 254 | dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); |
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev) | |||
529 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); | 539 | I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); |
530 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); | 540 | I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); |
531 | 541 | ||
542 | /* Cursor state */ | ||
543 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | ||
544 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | ||
545 | I915_WRITE(CURABASE, dev_priv->saveCURABASE); | ||
546 | I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); | ||
547 | I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); | ||
548 | I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); | ||
549 | if (IS_GEN2(dev)) | ||
550 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | ||
551 | |||
532 | return; | 552 | return; |
533 | } | 553 | } |
534 | 554 | ||
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev) | |||
543 | /* Don't save them in KMS mode */ | 563 | /* Don't save them in KMS mode */ |
544 | i915_save_modeset_reg(dev); | 564 | i915_save_modeset_reg(dev); |
545 | 565 | ||
546 | /* Cursor state */ | ||
547 | dev_priv->saveCURACNTR = I915_READ(CURACNTR); | ||
548 | dev_priv->saveCURAPOS = I915_READ(CURAPOS); | ||
549 | dev_priv->saveCURABASE = I915_READ(CURABASE); | ||
550 | dev_priv->saveCURBCNTR = I915_READ(CURBCNTR); | ||
551 | dev_priv->saveCURBPOS = I915_READ(CURBPOS); | ||
552 | dev_priv->saveCURBBASE = I915_READ(CURBBASE); | ||
553 | if (IS_GEN2(dev)) | ||
554 | dev_priv->saveCURSIZE = I915_READ(CURSIZE); | ||
555 | |||
556 | /* CRT state */ | 566 | /* CRT state */ |
557 | if (HAS_PCH_SPLIT(dev)) { | 567 | if (HAS_PCH_SPLIT(dev)) { |
558 | dev_priv->saveADPA = I915_READ(PCH_ADPA); | 568 | dev_priv->saveADPA = I915_READ(PCH_ADPA); |
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev) | |||
657 | /* Don't restore them in KMS mode */ | 667 | /* Don't restore them in KMS mode */ |
658 | i915_restore_modeset_reg(dev); | 668 | i915_restore_modeset_reg(dev); |
659 | 669 | ||
660 | /* Cursor state */ | ||
661 | I915_WRITE(CURAPOS, dev_priv->saveCURAPOS); | ||
662 | I915_WRITE(CURACNTR, dev_priv->saveCURACNTR); | ||
663 | I915_WRITE(CURABASE, dev_priv->saveCURABASE); | ||
664 | I915_WRITE(CURBPOS, dev_priv->saveCURBPOS); | ||
665 | I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR); | ||
666 | I915_WRITE(CURBBASE, dev_priv->saveCURBBASE); | ||
667 | if (IS_GEN2(dev)) | ||
668 | I915_WRITE(CURSIZE, dev_priv->saveCURSIZE); | ||
669 | |||
670 | /* CRT state */ | 670 | /* CRT state */ |
671 | if (HAS_PCH_SPLIT(dev)) | 671 | if (HAS_PCH_SPLIT(dev)) |
672 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); | 672 | I915_WRITE(PCH_ADPA, dev_priv->saveADPA); |
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c index 65c88f9ba12c..2cb8e0b9f1ee 100644 --- a/drivers/gpu/drm/i915/intel_acpi.c +++ b/drivers/gpu/drm/i915/intel_acpi.c | |||
@@ -190,37 +190,6 @@ out: | |||
190 | kfree(output.pointer); | 190 | kfree(output.pointer); |
191 | } | 191 | } |
192 | 192 | ||
193 | static int intel_dsm_switchto(enum vga_switcheroo_client_id id) | ||
194 | { | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int intel_dsm_power_state(enum vga_switcheroo_client_id id, | ||
199 | enum vga_switcheroo_state state) | ||
200 | { | ||
201 | return 0; | ||
202 | } | ||
203 | |||
204 | static int intel_dsm_init(void) | ||
205 | { | ||
206 | return 0; | ||
207 | } | ||
208 | |||
209 | static int intel_dsm_get_client_id(struct pci_dev *pdev) | ||
210 | { | ||
211 | if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev)) | ||
212 | return VGA_SWITCHEROO_IGD; | ||
213 | else | ||
214 | return VGA_SWITCHEROO_DIS; | ||
215 | } | ||
216 | |||
217 | static struct vga_switcheroo_handler intel_dsm_handler = { | ||
218 | .switchto = intel_dsm_switchto, | ||
219 | .power_state = intel_dsm_power_state, | ||
220 | .init = intel_dsm_init, | ||
221 | .get_client_id = intel_dsm_get_client_id, | ||
222 | }; | ||
223 | |||
224 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) | 193 | static bool intel_dsm_pci_probe(struct pci_dev *pdev) |
225 | { | 194 | { |
226 | acpi_handle dhandle, intel_handle; | 195 | acpi_handle dhandle, intel_handle; |
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void) | |||
276 | { | 245 | { |
277 | if (!intel_dsm_detect()) | 246 | if (!intel_dsm_detect()) |
278 | return; | 247 | return; |
279 | |||
280 | vga_switcheroo_register_handler(&intel_dsm_handler); | ||
281 | } | 248 | } |
282 | 249 | ||
283 | void intel_unregister_dsm_handler(void) | 250 | void intel_unregister_dsm_handler(void) |
284 | { | 251 | { |
285 | vga_switcheroo_unregister_handler(); | ||
286 | } | 252 | } |
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c index b0b1200ed650..2b2078695d2a 100644 --- a/drivers/gpu/drm/i915/intel_bios.c +++ b/drivers/gpu/drm/i915/intel_bios.c | |||
@@ -270,7 +270,7 @@ parse_general_features(struct drm_i915_private *dev_priv, | |||
270 | general->ssc_freq ? 66 : 48; | 270 | general->ssc_freq ? 66 : 48; |
271 | else if (IS_GEN5(dev) || IS_GEN6(dev)) | 271 | else if (IS_GEN5(dev) || IS_GEN6(dev)) |
272 | dev_priv->lvds_ssc_freq = | 272 | dev_priv->lvds_ssc_freq = |
273 | general->ssc_freq ? 100 : 120; | 273 | general->ssc_freq ? 120 : 100; |
274 | else | 274 | else |
275 | dev_priv->lvds_ssc_freq = | 275 | dev_priv->lvds_ssc_freq = |
276 | general->ssc_freq ? 100 : 96; | 276 | general->ssc_freq ? 100 : 96; |
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c index c55c77043357..8df574316063 100644 --- a/drivers/gpu/drm/i915/intel_crt.c +++ b/drivers/gpu/drm/i915/intel_crt.c | |||
@@ -34,6 +34,25 @@ | |||
34 | #include "i915_drm.h" | 34 | #include "i915_drm.h" |
35 | #include "i915_drv.h" | 35 | #include "i915_drv.h" |
36 | 36 | ||
37 | /* Here's the desired hotplug mode */ | ||
38 | #define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \ | ||
39 | ADPA_CRT_HOTPLUG_WARMUP_10MS | \ | ||
40 | ADPA_CRT_HOTPLUG_SAMPLE_4S | \ | ||
41 | ADPA_CRT_HOTPLUG_VOLTAGE_50 | \ | ||
42 | ADPA_CRT_HOTPLUG_VOLREF_325MV | \ | ||
43 | ADPA_CRT_HOTPLUG_ENABLE) | ||
44 | |||
45 | struct intel_crt { | ||
46 | struct intel_encoder base; | ||
47 | bool force_hotplug_required; | ||
48 | }; | ||
49 | |||
50 | static struct intel_crt *intel_attached_crt(struct drm_connector *connector) | ||
51 | { | ||
52 | return container_of(intel_attached_encoder(connector), | ||
53 | struct intel_crt, base); | ||
54 | } | ||
55 | |||
37 | static void intel_crt_dpms(struct drm_encoder *encoder, int mode) | 56 | static void intel_crt_dpms(struct drm_encoder *encoder, int mode) |
38 | { | 57 | { |
39 | struct drm_device *dev = encoder->dev; | 58 | struct drm_device *dev = encoder->dev; |
@@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
129 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); | 148 | dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); |
130 | } | 149 | } |
131 | 150 | ||
132 | adpa = 0; | 151 | adpa = ADPA_HOTPLUG_BITS; |
133 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) | 152 | if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) |
134 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; | 153 | adpa |= ADPA_HSYNC_ACTIVE_HIGH; |
135 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) | 154 | if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) |
@@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder, | |||
157 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) | 176 | static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) |
158 | { | 177 | { |
159 | struct drm_device *dev = connector->dev; | 178 | struct drm_device *dev = connector->dev; |
179 | struct intel_crt *crt = intel_attached_crt(connector); | ||
160 | struct drm_i915_private *dev_priv = dev->dev_private; | 180 | struct drm_i915_private *dev_priv = dev->dev_private; |
161 | u32 adpa, temp; | 181 | u32 adpa; |
162 | bool ret; | 182 | bool ret; |
163 | bool turn_off_dac = false; | ||
164 | 183 | ||
165 | temp = adpa = I915_READ(PCH_ADPA); | 184 | /* The first time through, trigger an explicit detection cycle */ |
185 | if (crt->force_hotplug_required) { | ||
186 | bool turn_off_dac = HAS_PCH_SPLIT(dev); | ||
187 | u32 save_adpa; | ||
166 | 188 | ||
167 | if (HAS_PCH_SPLIT(dev)) | 189 | crt->force_hotplug_required = 0; |
168 | turn_off_dac = true; | 190 | |
169 | 191 | save_adpa = adpa = I915_READ(PCH_ADPA); | |
170 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | 192 | DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa); |
171 | if (turn_off_dac) | 193 | |
172 | adpa &= ~ADPA_DAC_ENABLE; | 194 | adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER; |
173 | 195 | if (turn_off_dac) | |
174 | /* disable HPD first */ | 196 | adpa &= ~ADPA_DAC_ENABLE; |
175 | I915_WRITE(PCH_ADPA, adpa); | 197 | |
176 | (void)I915_READ(PCH_ADPA); | 198 | I915_WRITE(PCH_ADPA, adpa); |
177 | 199 | ||
178 | adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | | 200 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, |
179 | ADPA_CRT_HOTPLUG_WARMUP_10MS | | 201 | 1000)) |
180 | ADPA_CRT_HOTPLUG_SAMPLE_4S | | 202 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); |
181 | ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ | 203 | |
182 | ADPA_CRT_HOTPLUG_VOLREF_325MV | | 204 | if (turn_off_dac) { |
183 | ADPA_CRT_HOTPLUG_ENABLE | | 205 | I915_WRITE(PCH_ADPA, save_adpa); |
184 | ADPA_CRT_HOTPLUG_FORCE_TRIGGER); | 206 | POSTING_READ(PCH_ADPA); |
185 | 207 | } | |
186 | DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa); | ||
187 | I915_WRITE(PCH_ADPA, adpa); | ||
188 | |||
189 | if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0, | ||
190 | 1000)) | ||
191 | DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER"); | ||
192 | |||
193 | if (turn_off_dac) { | ||
194 | /* Make sure hotplug is enabled */ | ||
195 | I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE); | ||
196 | (void)I915_READ(PCH_ADPA); | ||
197 | } | 208 | } |
198 | 209 | ||
199 | /* Check the status to see if both blue and green are on now */ | 210 | /* Check the status to see if both blue and green are on now */ |
200 | adpa = I915_READ(PCH_ADPA); | 211 | adpa = I915_READ(PCH_ADPA); |
201 | adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; | 212 | if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0) |
202 | if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) || | ||
203 | (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO)) | ||
204 | ret = true; | 213 | ret = true; |
205 | else | 214 | else |
206 | ret = false; | 215 | ret = false; |
216 | DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret); | ||
207 | 217 | ||
208 | return ret; | 218 | return ret; |
209 | } | 219 | } |
@@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus) | |||
277 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; | 287 | return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; |
278 | } | 288 | } |
279 | 289 | ||
280 | static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | 290 | static bool intel_crt_detect_ddc(struct intel_crt *crt) |
281 | { | 291 | { |
282 | struct intel_encoder *intel_encoder = to_intel_encoder(encoder); | 292 | struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private; |
283 | struct drm_i915_private *dev_priv = encoder->dev->dev_private; | ||
284 | 293 | ||
285 | /* CRT should always be at 0, but check anyway */ | 294 | /* CRT should always be at 0, but check anyway */ |
286 | if (intel_encoder->type != INTEL_OUTPUT_ANALOG) | 295 | if (crt->base.type != INTEL_OUTPUT_ANALOG) |
287 | return false; | 296 | return false; |
288 | 297 | ||
289 | if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { | 298 | if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { |
@@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | |||
291 | return true; | 300 | return true; |
292 | } | 301 | } |
293 | 302 | ||
294 | if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { | 303 | if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) { |
295 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); | 304 | DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); |
296 | return true; | 305 | return true; |
297 | } | 306 | } |
@@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder) | |||
300 | } | 309 | } |
301 | 310 | ||
302 | static enum drm_connector_status | 311 | static enum drm_connector_status |
303 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) | 312 | intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt) |
304 | { | 313 | { |
305 | struct drm_encoder *encoder = &intel_encoder->base; | 314 | struct drm_encoder *encoder = &crt->base.base; |
306 | struct drm_device *dev = encoder->dev; | 315 | struct drm_device *dev = encoder->dev; |
307 | struct drm_i915_private *dev_priv = dev->dev_private; | 316 | struct drm_i915_private *dev_priv = dev->dev_private; |
308 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); | 317 | struct intel_crtc *intel_crtc = to_intel_crtc(crtc); |
@@ -434,7 +443,7 @@ static enum drm_connector_status | |||
434 | intel_crt_detect(struct drm_connector *connector, bool force) | 443 | intel_crt_detect(struct drm_connector *connector, bool force) |
435 | { | 444 | { |
436 | struct drm_device *dev = connector->dev; | 445 | struct drm_device *dev = connector->dev; |
437 | struct intel_encoder *encoder = intel_attached_encoder(connector); | 446 | struct intel_crt *crt = intel_attached_crt(connector); |
438 | struct drm_crtc *crtc; | 447 | struct drm_crtc *crtc; |
439 | int dpms_mode; | 448 | int dpms_mode; |
440 | enum drm_connector_status status; | 449 | enum drm_connector_status status; |
@@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force) | |||
443 | if (intel_crt_detect_hotplug(connector)) { | 452 | if (intel_crt_detect_hotplug(connector)) { |
444 | DRM_DEBUG_KMS("CRT detected via hotplug\n"); | 453 | DRM_DEBUG_KMS("CRT detected via hotplug\n"); |
445 | return connector_status_connected; | 454 | return connector_status_connected; |
446 | } else | 455 | } else { |
456 | DRM_DEBUG_KMS("CRT not detected via hotplug\n"); | ||
447 | return connector_status_disconnected; | 457 | return connector_status_disconnected; |
458 | } | ||
448 | } | 459 | } |
449 | 460 | ||
450 | if (intel_crt_detect_ddc(&encoder->base)) | 461 | if (intel_crt_detect_ddc(crt)) |
451 | return connector_status_connected; | 462 | return connector_status_connected; |
452 | 463 | ||
453 | if (!force) | 464 | if (!force) |
454 | return connector->status; | 465 | return connector->status; |
455 | 466 | ||
456 | /* for pre-945g platforms use load detect */ | 467 | /* for pre-945g platforms use load detect */ |
457 | if (encoder->base.crtc && encoder->base.crtc->enabled) { | 468 | crtc = crt->base.base.crtc; |
458 | status = intel_crt_load_detect(encoder->base.crtc, encoder); | 469 | if (crtc && crtc->enabled) { |
470 | status = intel_crt_load_detect(crtc, crt); | ||
459 | } else { | 471 | } else { |
460 | crtc = intel_get_load_detect_pipe(encoder, connector, | 472 | crtc = intel_get_load_detect_pipe(&crt->base, connector, |
461 | NULL, &dpms_mode); | 473 | NULL, &dpms_mode); |
462 | if (crtc) { | 474 | if (crtc) { |
463 | if (intel_crt_detect_ddc(&encoder->base)) | 475 | if (intel_crt_detect_ddc(crt)) |
464 | status = connector_status_connected; | 476 | status = connector_status_connected; |
465 | else | 477 | else |
466 | status = intel_crt_load_detect(crtc, encoder); | 478 | status = intel_crt_load_detect(crtc, crt); |
467 | intel_release_load_detect_pipe(encoder, | 479 | intel_release_load_detect_pipe(&crt->base, |
468 | connector, dpms_mode); | 480 | connector, dpms_mode); |
469 | } else | 481 | } else |
470 | status = connector_status_unknown; | 482 | status = connector_status_unknown; |
@@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = { | |||
536 | void intel_crt_init(struct drm_device *dev) | 548 | void intel_crt_init(struct drm_device *dev) |
537 | { | 549 | { |
538 | struct drm_connector *connector; | 550 | struct drm_connector *connector; |
539 | struct intel_encoder *intel_encoder; | 551 | struct intel_crt *crt; |
540 | struct intel_connector *intel_connector; | 552 | struct intel_connector *intel_connector; |
541 | struct drm_i915_private *dev_priv = dev->dev_private; | 553 | struct drm_i915_private *dev_priv = dev->dev_private; |
542 | 554 | ||
543 | intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); | 555 | crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL); |
544 | if (!intel_encoder) | 556 | if (!crt) |
545 | return; | 557 | return; |
546 | 558 | ||
547 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 559 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
548 | if (!intel_connector) { | 560 | if (!intel_connector) { |
549 | kfree(intel_encoder); | 561 | kfree(crt); |
550 | return; | 562 | return; |
551 | } | 563 | } |
552 | 564 | ||
@@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev) | |||
554 | drm_connector_init(dev, &intel_connector->base, | 566 | drm_connector_init(dev, &intel_connector->base, |
555 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); | 567 | &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); |
556 | 568 | ||
557 | drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, | 569 | drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs, |
558 | DRM_MODE_ENCODER_DAC); | 570 | DRM_MODE_ENCODER_DAC); |
559 | 571 | ||
560 | intel_connector_attach_encoder(intel_connector, intel_encoder); | 572 | intel_connector_attach_encoder(intel_connector, &crt->base); |
561 | 573 | ||
562 | intel_encoder->type = INTEL_OUTPUT_ANALOG; | 574 | crt->base.type = INTEL_OUTPUT_ANALOG; |
563 | intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 575 | crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT | |
564 | (1 << INTEL_ANALOG_CLONE_BIT) | | 576 | 1 << INTEL_ANALOG_CLONE_BIT | |
565 | (1 << INTEL_SDVO_LVDS_CLONE_BIT); | 577 | 1 << INTEL_SDVO_LVDS_CLONE_BIT); |
566 | intel_encoder->crtc_mask = (1 << 0) | (1 << 1); | 578 | crt->base.crtc_mask = (1 << 0) | (1 << 1); |
567 | connector->interlace_allowed = 1; | 579 | connector->interlace_allowed = 1; |
568 | connector->doublescan_allowed = 0; | 580 | connector->doublescan_allowed = 0; |
569 | 581 | ||
570 | drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); | 582 | drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs); |
571 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); | 583 | drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); |
572 | 584 | ||
573 | drm_sysfs_connector_add(connector); | 585 | drm_sysfs_connector_add(connector); |
@@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev) | |||
577 | else | 589 | else |
578 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; | 590 | connector->polled = DRM_CONNECTOR_POLL_CONNECT; |
579 | 591 | ||
592 | /* | ||
593 | * Configure the automatic hotplug detection stuff | ||
594 | */ | ||
595 | crt->force_hotplug_required = 0; | ||
596 | if (HAS_PCH_SPLIT(dev)) { | ||
597 | u32 adpa; | ||
598 | |||
599 | adpa = I915_READ(PCH_ADPA); | ||
600 | adpa &= ~ADPA_CRT_HOTPLUG_MASK; | ||
601 | adpa |= ADPA_HOTPLUG_BITS; | ||
602 | I915_WRITE(PCH_ADPA, adpa); | ||
603 | POSTING_READ(PCH_ADPA); | ||
604 | |||
605 | DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa); | ||
606 | crt->force_hotplug_required = 1; | ||
607 | } | ||
608 | |||
580 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; | 609 | dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; |
581 | } | 610 | } |
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 48d8fd686ea9..d9b7092439ef 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c | |||
@@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, | |||
1611 | 1611 | ||
1612 | wait_event(dev_priv->pending_flip_queue, | 1612 | wait_event(dev_priv->pending_flip_queue, |
1613 | atomic_read(&obj_priv->pending_flip) == 0); | 1613 | atomic_read(&obj_priv->pending_flip) == 0); |
1614 | |||
1615 | /* Big Hammer, we also need to ensure that any pending | ||
1616 | * MI_WAIT_FOR_EVENT inside a user batch buffer on the | ||
1617 | * current scanout is retired before unpinning the old | ||
1618 | * framebuffer. | ||
1619 | */ | ||
1620 | ret = i915_gem_object_flush_gpu(obj_priv, false); | ||
1621 | if (ret) { | ||
1622 | i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj); | ||
1623 | mutex_unlock(&dev->struct_mutex); | ||
1624 | return ret; | ||
1625 | } | ||
1614 | } | 1626 | } |
1615 | 1627 | ||
1616 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, | 1628 | ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, |
@@ -2108,9 +2120,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc) | |||
2108 | reg = TRANS_DP_CTL(pipe); | 2120 | reg = TRANS_DP_CTL(pipe); |
2109 | temp = I915_READ(reg); | 2121 | temp = I915_READ(reg); |
2110 | temp &= ~(TRANS_DP_PORT_SEL_MASK | | 2122 | temp &= ~(TRANS_DP_PORT_SEL_MASK | |
2111 | TRANS_DP_SYNC_MASK); | 2123 | TRANS_DP_SYNC_MASK | |
2124 | TRANS_DP_BPC_MASK); | ||
2112 | temp |= (TRANS_DP_OUTPUT_ENABLE | | 2125 | temp |= (TRANS_DP_OUTPUT_ENABLE | |
2113 | TRANS_DP_ENH_FRAMING); | 2126 | TRANS_DP_ENH_FRAMING); |
2127 | temp |= TRANS_DP_8BPC; | ||
2114 | 2128 | ||
2115 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) | 2129 | if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) |
2116 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; | 2130 | temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; |
@@ -2700,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den) | |||
2700 | } | 2714 | } |
2701 | } | 2715 | } |
2702 | 2716 | ||
2703 | #define DATA_N 0x800000 | ||
2704 | #define LINK_N 0x80000 | ||
2705 | |||
2706 | static void | 2717 | static void |
2707 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, | 2718 | ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, |
2708 | int link_clock, struct fdi_m_n *m_n) | 2719 | int link_clock, struct fdi_m_n *m_n) |
2709 | { | 2720 | { |
2710 | u64 temp; | ||
2711 | |||
2712 | m_n->tu = 64; /* default size */ | 2721 | m_n->tu = 64; /* default size */ |
2713 | 2722 | ||
2714 | temp = (u64) DATA_N * pixel_clock; | 2723 | /* BUG_ON(pixel_clock > INT_MAX / 36); */ |
2715 | temp = div_u64(temp, link_clock); | 2724 | m_n->gmch_m = bits_per_pixel * pixel_clock; |
2716 | m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); | 2725 | m_n->gmch_n = link_clock * nlanes * 8; |
2717 | m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */ | ||
2718 | m_n->gmch_n = DATA_N; | ||
2719 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); | 2726 | fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); |
2720 | 2727 | ||
2721 | temp = (u64) LINK_N * pixel_clock; | 2728 | m_n->link_m = pixel_clock; |
2722 | m_n->link_m = div_u64(temp, link_clock); | 2729 | m_n->link_n = link_clock; |
2723 | m_n->link_n = LINK_N; | ||
2724 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); | 2730 | fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); |
2725 | } | 2731 | } |
2726 | 2732 | ||
@@ -3704,6 +3710,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3704 | 3710 | ||
3705 | /* FDI link */ | 3711 | /* FDI link */ |
3706 | if (HAS_PCH_SPLIT(dev)) { | 3712 | if (HAS_PCH_SPLIT(dev)) { |
3713 | int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode); | ||
3707 | int lane = 0, link_bw, bpp; | 3714 | int lane = 0, link_bw, bpp; |
3708 | /* CPU eDP doesn't require FDI link, so just set DP M/N | 3715 | /* CPU eDP doesn't require FDI link, so just set DP M/N |
3709 | according to current link config */ | 3716 | according to current link config */ |
@@ -3787,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc, | |||
3787 | 3794 | ||
3788 | intel_crtc->fdi_lanes = lane; | 3795 | intel_crtc->fdi_lanes = lane; |
3789 | 3796 | ||
3797 | if (pixel_multiplier > 1) | ||
3798 | link_bw *= pixel_multiplier; | ||
3790 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); | 3799 | ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); |
3791 | } | 3800 | } |
3792 | 3801 | ||
@@ -5224,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = { | |||
5224 | .page_flip = intel_crtc_page_flip, | 5233 | .page_flip = intel_crtc_page_flip, |
5225 | }; | 5234 | }; |
5226 | 5235 | ||
5236 | static void intel_sanitize_modesetting(struct drm_device *dev, | ||
5237 | int pipe, int plane) | ||
5238 | { | ||
5239 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
5240 | u32 reg, val; | ||
5241 | |||
5242 | if (HAS_PCH_SPLIT(dev)) | ||
5243 | return; | ||
5244 | |||
5245 | /* Who knows what state these registers were left in by the BIOS or | ||
5246 | * grub? | ||
5247 | * | ||
5248 | * If we leave the registers in a conflicting state (e.g. with the | ||
5249 | * display plane reading from the other pipe than the one we intend | ||
5250 | * to use) then when we attempt to teardown the active mode, we will | ||
5251 | * not disable the pipes and planes in the correct order -- leaving | ||
5252 | * a plane reading from a disabled pipe and possibly leading to | ||
5253 | * undefined behaviour. | ||
5254 | */ | ||
5255 | |||
5256 | reg = DSPCNTR(plane); | ||
5257 | val = I915_READ(reg); | ||
5258 | |||
5259 | if ((val & DISPLAY_PLANE_ENABLE) == 0) | ||
5260 | return; | ||
5261 | if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe) | ||
5262 | return; | ||
5263 | |||
5264 | /* This display plane is active and attached to the other CPU pipe. */ | ||
5265 | pipe = !pipe; | ||
5266 | |||
5267 | /* Disable the plane and wait for it to stop reading from the pipe. */ | ||
5268 | I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE); | ||
5269 | intel_flush_display_plane(dev, plane); | ||
5270 | |||
5271 | if (IS_GEN2(dev)) | ||
5272 | intel_wait_for_vblank(dev, pipe); | ||
5273 | |||
5274 | if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE)) | ||
5275 | return; | ||
5276 | |||
5277 | /* Switch off the pipe. */ | ||
5278 | reg = PIPECONF(pipe); | ||
5279 | val = I915_READ(reg); | ||
5280 | if (val & PIPECONF_ENABLE) { | ||
5281 | I915_WRITE(reg, val & ~PIPECONF_ENABLE); | ||
5282 | intel_wait_for_pipe_off(dev, pipe); | ||
5283 | } | ||
5284 | } | ||
5227 | 5285 | ||
5228 | static void intel_crtc_init(struct drm_device *dev, int pipe) | 5286 | static void intel_crtc_init(struct drm_device *dev, int pipe) |
5229 | { | 5287 | { |
@@ -5275,6 +5333,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe) | |||
5275 | 5333 | ||
5276 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, | 5334 | setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, |
5277 | (unsigned long)intel_crtc); | 5335 | (unsigned long)intel_crtc); |
5336 | |||
5337 | intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane); | ||
5278 | } | 5338 | } |
5279 | 5339 | ||
5280 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, | 5340 | int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, |
@@ -5324,9 +5384,14 @@ static void intel_setup_outputs(struct drm_device *dev) | |||
5324 | struct drm_i915_private *dev_priv = dev->dev_private; | 5384 | struct drm_i915_private *dev_priv = dev->dev_private; |
5325 | struct intel_encoder *encoder; | 5385 | struct intel_encoder *encoder; |
5326 | bool dpd_is_edp = false; | 5386 | bool dpd_is_edp = false; |
5387 | bool has_lvds = false; | ||
5327 | 5388 | ||
5328 | if (IS_MOBILE(dev) && !IS_I830(dev)) | 5389 | if (IS_MOBILE(dev) && !IS_I830(dev)) |
5329 | intel_lvds_init(dev); | 5390 | has_lvds = intel_lvds_init(dev); |
5391 | if (!has_lvds && !HAS_PCH_SPLIT(dev)) { | ||
5392 | /* disable the panel fitter on everything but LVDS */ | ||
5393 | I915_WRITE(PFIT_CONTROL, 0); | ||
5394 | } | ||
5330 | 5395 | ||
5331 | if (HAS_PCH_SPLIT(dev)) { | 5396 | if (HAS_PCH_SPLIT(dev)) { |
5332 | dpd_is_edp = intel_dpd_is_edp(dev); | 5397 | dpd_is_edp = intel_dpd_is_edp(dev); |
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index c8e005553310..864417cffe9a 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c | |||
@@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
479 | uint16_t address = algo_data->address; | 479 | uint16_t address = algo_data->address; |
480 | uint8_t msg[5]; | 480 | uint8_t msg[5]; |
481 | uint8_t reply[2]; | 481 | uint8_t reply[2]; |
482 | unsigned retry; | ||
482 | int msg_bytes; | 483 | int msg_bytes; |
483 | int reply_bytes; | 484 | int reply_bytes; |
484 | int ret; | 485 | int ret; |
@@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
513 | break; | 514 | break; |
514 | } | 515 | } |
515 | 516 | ||
516 | for (;;) { | 517 | for (retry = 0; retry < 5; retry++) { |
517 | ret = intel_dp_aux_ch(intel_dp, | 518 | ret = intel_dp_aux_ch(intel_dp, |
518 | msg, msg_bytes, | 519 | msg, msg_bytes, |
519 | reply, reply_bytes); | 520 | reply, reply_bytes); |
520 | if (ret < 0) { | 521 | if (ret < 0) { |
521 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); | 522 | DRM_DEBUG_KMS("aux_ch failed %d\n", ret); |
522 | return ret; | 523 | return ret; |
523 | } | 524 | } |
525 | |||
526 | switch (reply[0] & AUX_NATIVE_REPLY_MASK) { | ||
527 | case AUX_NATIVE_REPLY_ACK: | ||
528 | /* I2C-over-AUX Reply field is only valid | ||
529 | * when paired with AUX ACK. | ||
530 | */ | ||
531 | break; | ||
532 | case AUX_NATIVE_REPLY_NACK: | ||
533 | DRM_DEBUG_KMS("aux_ch native nack\n"); | ||
534 | return -EREMOTEIO; | ||
535 | case AUX_NATIVE_REPLY_DEFER: | ||
536 | udelay(100); | ||
537 | continue; | ||
538 | default: | ||
539 | DRM_ERROR("aux_ch invalid native reply 0x%02x\n", | ||
540 | reply[0]); | ||
541 | return -EREMOTEIO; | ||
542 | } | ||
543 | |||
524 | switch (reply[0] & AUX_I2C_REPLY_MASK) { | 544 | switch (reply[0] & AUX_I2C_REPLY_MASK) { |
525 | case AUX_I2C_REPLY_ACK: | 545 | case AUX_I2C_REPLY_ACK: |
526 | if (mode == MODE_I2C_READ) { | 546 | if (mode == MODE_I2C_READ) { |
@@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, | |||
528 | } | 548 | } |
529 | return reply_bytes - 1; | 549 | return reply_bytes - 1; |
530 | case AUX_I2C_REPLY_NACK: | 550 | case AUX_I2C_REPLY_NACK: |
531 | DRM_DEBUG_KMS("aux_ch nack\n"); | 551 | DRM_DEBUG_KMS("aux_i2c nack\n"); |
532 | return -EREMOTEIO; | 552 | return -EREMOTEIO; |
533 | case AUX_I2C_REPLY_DEFER: | 553 | case AUX_I2C_REPLY_DEFER: |
534 | DRM_DEBUG_KMS("aux_ch defer\n"); | 554 | DRM_DEBUG_KMS("aux_i2c defer\n"); |
535 | udelay(100); | 555 | udelay(100); |
536 | break; | 556 | break; |
537 | default: | 557 | default: |
538 | DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]); | 558 | DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); |
539 | return -EREMOTEIO; | 559 | return -EREMOTEIO; |
540 | } | 560 | } |
541 | } | 561 | } |
562 | |||
563 | DRM_ERROR("too many retries, giving up\n"); | ||
564 | return -EREMOTEIO; | ||
542 | } | 565 | } |
543 | 566 | ||
544 | static int | 567 | static int |
@@ -584,17 +607,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
584 | mode->clock = dev_priv->panel_fixed_mode->clock; | 607 | mode->clock = dev_priv->panel_fixed_mode->clock; |
585 | } | 608 | } |
586 | 609 | ||
587 | /* Just use VBT values for eDP */ | ||
588 | if (is_edp(intel_dp)) { | ||
589 | intel_dp->lane_count = dev_priv->edp.lanes; | ||
590 | intel_dp->link_bw = dev_priv->edp.rate; | ||
591 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
592 | DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n", | ||
593 | intel_dp->link_bw, intel_dp->lane_count, | ||
594 | adjusted_mode->clock); | ||
595 | return true; | ||
596 | } | ||
597 | |||
598 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { | 610 | for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { |
599 | for (clock = 0; clock <= max_clock; clock++) { | 611 | for (clock = 0; clock <= max_clock; clock++) { |
600 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); | 612 | int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); |
@@ -613,6 +625,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, | |||
613 | } | 625 | } |
614 | } | 626 | } |
615 | 627 | ||
628 | if (is_edp(intel_dp)) { | ||
629 | /* okay we failed just pick the highest */ | ||
630 | intel_dp->lane_count = max_lane_count; | ||
631 | intel_dp->link_bw = bws[max_clock]; | ||
632 | adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); | ||
633 | DRM_DEBUG_KMS("Force picking display port link bw %02x lane " | ||
634 | "count %d clock %d\n", | ||
635 | intel_dp->link_bw, intel_dp->lane_count, | ||
636 | adjusted_mode->clock); | ||
637 | |||
638 | return true; | ||
639 | } | ||
640 | |||
616 | return false; | 641 | return false; |
617 | } | 642 | } |
618 | 643 | ||
@@ -1087,21 +1112,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp) | |||
1087 | } | 1112 | } |
1088 | 1113 | ||
1089 | static uint32_t | 1114 | static uint32_t |
1090 | intel_dp_signal_levels(struct intel_dp *intel_dp) | 1115 | intel_dp_signal_levels(uint8_t train_set, int lane_count) |
1091 | { | 1116 | { |
1092 | struct drm_device *dev = intel_dp->base.base.dev; | 1117 | uint32_t signal_levels = 0; |
1093 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1094 | uint32_t signal_levels = 0; | ||
1095 | u8 train_set = intel_dp->train_set[0]; | ||
1096 | u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK; | ||
1097 | u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK; | ||
1098 | 1118 | ||
1099 | if (is_edp(intel_dp)) { | 1119 | switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { |
1100 | vswing = dev_priv->edp.vswing; | ||
1101 | preemphasis = dev_priv->edp.preemphasis; | ||
1102 | } | ||
1103 | |||
1104 | switch (vswing) { | ||
1105 | case DP_TRAIN_VOLTAGE_SWING_400: | 1120 | case DP_TRAIN_VOLTAGE_SWING_400: |
1106 | default: | 1121 | default: |
1107 | signal_levels |= DP_VOLTAGE_0_4; | 1122 | signal_levels |= DP_VOLTAGE_0_4; |
@@ -1116,7 +1131,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp) | |||
1116 | signal_levels |= DP_VOLTAGE_1_2; | 1131 | signal_levels |= DP_VOLTAGE_1_2; |
1117 | break; | 1132 | break; |
1118 | } | 1133 | } |
1119 | switch (preemphasis) { | 1134 | switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { |
1120 | case DP_TRAIN_PRE_EMPHASIS_0: | 1135 | case DP_TRAIN_PRE_EMPHASIS_0: |
1121 | default: | 1136 | default: |
1122 | signal_levels |= DP_PRE_EMPHASIS_0; | 1137 | signal_levels |= DP_PRE_EMPHASIS_0; |
@@ -1203,18 +1218,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp) | |||
1203 | } | 1218 | } |
1204 | 1219 | ||
1205 | static bool | 1220 | static bool |
1206 | intel_dp_aux_handshake_required(struct intel_dp *intel_dp) | ||
1207 | { | ||
1208 | struct drm_device *dev = intel_dp->base.base.dev; | ||
1209 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
1210 | |||
1211 | if (is_edp(intel_dp) && dev_priv->no_aux_handshake) | ||
1212 | return false; | ||
1213 | |||
1214 | return true; | ||
1215 | } | ||
1216 | |||
1217 | static bool | ||
1218 | intel_dp_set_link_train(struct intel_dp *intel_dp, | 1221 | intel_dp_set_link_train(struct intel_dp *intel_dp, |
1219 | uint32_t dp_reg_value, | 1222 | uint32_t dp_reg_value, |
1220 | uint8_t dp_train_pat) | 1223 | uint8_t dp_train_pat) |
@@ -1226,9 +1229,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp, | |||
1226 | I915_WRITE(intel_dp->output_reg, dp_reg_value); | 1229 | I915_WRITE(intel_dp->output_reg, dp_reg_value); |
1227 | POSTING_READ(intel_dp->output_reg); | 1230 | POSTING_READ(intel_dp->output_reg); |
1228 | 1231 | ||
1229 | if (!intel_dp_aux_handshake_required(intel_dp)) | ||
1230 | return true; | ||
1231 | |||
1232 | intel_dp_aux_native_write_1(intel_dp, | 1232 | intel_dp_aux_native_write_1(intel_dp, |
1233 | DP_TRAINING_PATTERN_SET, | 1233 | DP_TRAINING_PATTERN_SET, |
1234 | dp_train_pat); | 1234 | dp_train_pat); |
@@ -1261,11 +1261,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1261 | POSTING_READ(intel_dp->output_reg); | 1261 | POSTING_READ(intel_dp->output_reg); |
1262 | intel_wait_for_vblank(dev, intel_crtc->pipe); | 1262 | intel_wait_for_vblank(dev, intel_crtc->pipe); |
1263 | 1263 | ||
1264 | if (intel_dp_aux_handshake_required(intel_dp)) | 1264 | /* Write the link configuration data */ |
1265 | /* Write the link configuration data */ | 1265 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, |
1266 | intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, | 1266 | intel_dp->link_configuration, |
1267 | intel_dp->link_configuration, | 1267 | DP_LINK_CONFIGURATION_SIZE); |
1268 | DP_LINK_CONFIGURATION_SIZE); | ||
1269 | 1268 | ||
1270 | DP |= DP_PORT_EN; | 1269 | DP |= DP_PORT_EN; |
1271 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1270 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
@@ -1283,7 +1282,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1283 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1282 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1284 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1283 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1285 | } else { | 1284 | } else { |
1286 | signal_levels = intel_dp_signal_levels(intel_dp); | 1285 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); |
1287 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1286 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1288 | } | 1287 | } |
1289 | 1288 | ||
@@ -1297,37 +1296,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp) | |||
1297 | break; | 1296 | break; |
1298 | /* Set training pattern 1 */ | 1297 | /* Set training pattern 1 */ |
1299 | 1298 | ||
1300 | udelay(500); | 1299 | udelay(100); |
1301 | if (intel_dp_aux_handshake_required(intel_dp)) { | 1300 | if (!intel_dp_get_link_status(intel_dp)) |
1302 | break; | 1301 | break; |
1303 | } else { | ||
1304 | if (!intel_dp_get_link_status(intel_dp)) | ||
1305 | break; | ||
1306 | 1302 | ||
1307 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { | 1303 | if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { |
1308 | clock_recovery = true; | 1304 | clock_recovery = true; |
1309 | break; | 1305 | break; |
1310 | } | 1306 | } |
1311 | 1307 | ||
1312 | /* Check to see if we've tried the max voltage */ | 1308 | /* Check to see if we've tried the max voltage */ |
1313 | for (i = 0; i < intel_dp->lane_count; i++) | 1309 | for (i = 0; i < intel_dp->lane_count; i++) |
1314 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) | 1310 | if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) |
1315 | break; | ||
1316 | if (i == intel_dp->lane_count) | ||
1317 | break; | 1311 | break; |
1312 | if (i == intel_dp->lane_count) | ||
1313 | break; | ||
1318 | 1314 | ||
1319 | /* Check to see if we've tried the same voltage 5 times */ | 1315 | /* Check to see if we've tried the same voltage 5 times */ |
1320 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { | 1316 | if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { |
1321 | ++tries; | 1317 | ++tries; |
1322 | if (tries == 5) | 1318 | if (tries == 5) |
1323 | break; | 1319 | break; |
1324 | } else | 1320 | } else |
1325 | tries = 0; | 1321 | tries = 0; |
1326 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; | 1322 | voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; |
1327 | 1323 | ||
1328 | /* Compute new intel_dp->train_set as requested by target */ | 1324 | /* Compute new intel_dp->train_set as requested by target */ |
1329 | intel_get_adjust_train(intel_dp); | 1325 | intel_get_adjust_train(intel_dp); |
1330 | } | ||
1331 | } | 1326 | } |
1332 | 1327 | ||
1333 | intel_dp->DP = DP; | 1328 | intel_dp->DP = DP; |
@@ -1354,7 +1349,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1354 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); | 1349 | signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); |
1355 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; | 1350 | DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; |
1356 | } else { | 1351 | } else { |
1357 | signal_levels = intel_dp_signal_levels(intel_dp); | 1352 | signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); |
1358 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; | 1353 | DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; |
1359 | } | 1354 | } |
1360 | 1355 | ||
@@ -1368,28 +1363,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp) | |||
1368 | DP_TRAINING_PATTERN_2)) | 1363 | DP_TRAINING_PATTERN_2)) |
1369 | break; | 1364 | break; |
1370 | 1365 | ||
1371 | udelay(500); | 1366 | udelay(400); |
1372 | 1367 | if (!intel_dp_get_link_status(intel_dp)) | |
1373 | if (!intel_dp_aux_handshake_required(intel_dp)) { | ||
1374 | break; | 1368 | break; |
1375 | } else { | ||
1376 | if (!intel_dp_get_link_status(intel_dp)) | ||
1377 | break; | ||
1378 | 1369 | ||
1379 | if (intel_channel_eq_ok(intel_dp)) { | 1370 | if (intel_channel_eq_ok(intel_dp)) { |
1380 | channel_eq = true; | 1371 | channel_eq = true; |
1381 | break; | 1372 | break; |
1382 | } | 1373 | } |
1383 | 1374 | ||
1384 | /* Try 5 times */ | 1375 | /* Try 5 times */ |
1385 | if (tries > 5) | 1376 | if (tries > 5) |
1386 | break; | 1377 | break; |
1387 | 1378 | ||
1388 | /* Compute new intel_dp->train_set as requested by target */ | 1379 | /* Compute new intel_dp->train_set as requested by target */ |
1389 | intel_get_adjust_train(intel_dp); | 1380 | intel_get_adjust_train(intel_dp); |
1390 | ++tries; | 1381 | ++tries; |
1391 | } | ||
1392 | } | 1382 | } |
1383 | |||
1393 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) | 1384 | if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) |
1394 | reg = DP | DP_LINK_TRAIN_OFF_CPT; | 1385 | reg = DP | DP_LINK_TRAIN_OFF_CPT; |
1395 | else | 1386 | else |
@@ -1408,6 +1399,9 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1408 | struct drm_i915_private *dev_priv = dev->dev_private; | 1399 | struct drm_i915_private *dev_priv = dev->dev_private; |
1409 | uint32_t DP = intel_dp->DP; | 1400 | uint32_t DP = intel_dp->DP; |
1410 | 1401 | ||
1402 | if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) | ||
1403 | return; | ||
1404 | |||
1411 | DRM_DEBUG_KMS("\n"); | 1405 | DRM_DEBUG_KMS("\n"); |
1412 | 1406 | ||
1413 | if (is_edp(intel_dp)) { | 1407 | if (is_edp(intel_dp)) { |
@@ -1430,6 +1424,28 @@ intel_dp_link_down(struct intel_dp *intel_dp) | |||
1430 | 1424 | ||
1431 | if (is_edp(intel_dp)) | 1425 | if (is_edp(intel_dp)) |
1432 | DP |= DP_LINK_TRAIN_OFF; | 1426 | DP |= DP_LINK_TRAIN_OFF; |
1427 | |||
1428 | if (!HAS_PCH_CPT(dev) && | ||
1429 | I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { | ||
1430 | struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); | ||
1431 | /* Hardware workaround: leaving our transcoder select | ||
1432 | * set to transcoder B while it's off will prevent the | ||
1433 | * corresponding HDMI output on transcoder A. | ||
1434 | * | ||
1435 | * Combine this with another hardware workaround: | ||
1436 | * transcoder select bit can only be cleared while the | ||
1437 | * port is enabled. | ||
1438 | */ | ||
1439 | DP &= ~DP_PIPEB_SELECT; | ||
1440 | I915_WRITE(intel_dp->output_reg, DP); | ||
1441 | |||
1442 | /* Changes to enable or select take place the vblank | ||
1443 | * after being written. | ||
1444 | */ | ||
1445 | intel_wait_for_vblank(intel_dp->base.base.dev, | ||
1446 | intel_crtc->pipe); | ||
1447 | } | ||
1448 | |||
1433 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); | 1449 | I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); |
1434 | POSTING_READ(intel_dp->output_reg); | 1450 | POSTING_READ(intel_dp->output_reg); |
1435 | } | 1451 | } |
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 21551fe74541..e52c6125bb1f 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h | |||
@@ -237,7 +237,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device); | |||
237 | extern void intel_dvo_init(struct drm_device *dev); | 237 | extern void intel_dvo_init(struct drm_device *dev); |
238 | extern void intel_tv_init(struct drm_device *dev); | 238 | extern void intel_tv_init(struct drm_device *dev); |
239 | extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); | 239 | extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); |
240 | extern void intel_lvds_init(struct drm_device *dev); | 240 | extern bool intel_lvds_init(struct drm_device *dev); |
241 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); | 241 | extern void intel_dp_init(struct drm_device *dev, int dp_reg); |
242 | void | 242 | void |
243 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, | 243 | intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, |
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c index 2be4f728ed0c..3dba086e7eea 100644 --- a/drivers/gpu/drm/i915/intel_i2c.c +++ b/drivers/gpu/drm/i915/intel_i2c.c | |||
@@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) | |||
160 | }; | 160 | }; |
161 | struct intel_gpio *gpio; | 161 | struct intel_gpio *gpio; |
162 | 162 | ||
163 | if (pin < 1 || pin > 7) | 163 | if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) |
164 | return NULL; | 164 | return NULL; |
165 | 165 | ||
166 | gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); | 166 | gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); |
@@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin) | |||
172 | gpio->reg += PCH_GPIOA - GPIOA; | 172 | gpio->reg += PCH_GPIOA - GPIOA; |
173 | gpio->dev_priv = dev_priv; | 173 | gpio->dev_priv = dev_priv; |
174 | 174 | ||
175 | snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); | 175 | snprintf(gpio->adapter.name, sizeof(gpio->adapter.name), |
176 | "i915 GPIO%c", "?BACDE?F"[pin]); | ||
176 | gpio->adapter.owner = THIS_MODULE; | 177 | gpio->adapter.owner = THIS_MODULE; |
177 | gpio->adapter.algo_data = &gpio->algo; | 178 | gpio->adapter.algo_data = &gpio->algo; |
178 | gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; | 179 | gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; |
@@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
349 | "panel", | 350 | "panel", |
350 | "dpc", | 351 | "dpc", |
351 | "dpb", | 352 | "dpb", |
352 | "reserved" | 353 | "reserved", |
353 | "dpd", | 354 | "dpd", |
354 | }; | 355 | }; |
355 | struct drm_i915_private *dev_priv = dev->dev_private; | 356 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev) | |||
366 | bus->adapter.owner = THIS_MODULE; | 367 | bus->adapter.owner = THIS_MODULE; |
367 | bus->adapter.class = I2C_CLASS_DDC; | 368 | bus->adapter.class = I2C_CLASS_DDC; |
368 | snprintf(bus->adapter.name, | 369 | snprintf(bus->adapter.name, |
369 | I2C_NAME_SIZE, | 370 | sizeof(bus->adapter.name), |
370 | "gmbus %s", | 371 | "i915 gmbus %s", |
371 | names[i]); | 372 | names[i]); |
372 | 373 | ||
373 | bus->adapter.dev.parent = &dev->pdev->dev; | 374 | bus->adapter.dev.parent = &dev->pdev->dev; |
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 4324a326f98e..25bcedf386fd 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c | |||
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector) | |||
68 | /** | 68 | /** |
69 | * Sets the power state for the panel. | 69 | * Sets the power state for the panel. |
70 | */ | 70 | */ |
71 | static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) | 71 | static void intel_lvds_enable(struct intel_lvds *intel_lvds) |
72 | { | 72 | { |
73 | struct drm_device *dev = intel_lvds->base.base.dev; | 73 | struct drm_device *dev = intel_lvds->base.base.dev; |
74 | struct drm_i915_private *dev_priv = dev->dev_private; | 74 | struct drm_i915_private *dev_priv = dev->dev_private; |
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) | |||
82 | lvds_reg = LVDS; | 82 | lvds_reg = LVDS; |
83 | } | 83 | } |
84 | 84 | ||
85 | if (on) { | 85 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); |
86 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN); | ||
87 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | ||
88 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
89 | } else { | ||
90 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
91 | |||
92 | intel_panel_set_backlight(dev, 0); | ||
93 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | ||
94 | 86 | ||
95 | if (intel_lvds->pfit_control) { | 87 | if (intel_lvds->pfit_dirty) { |
96 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | 88 | /* |
97 | DRM_ERROR("timed out waiting for panel to power off\n"); | 89 | * Enable automatic panel scaling so that non-native modes |
98 | I915_WRITE(PFIT_CONTROL, 0); | 90 | * fill the screen. The panel fitter should only be |
99 | intel_lvds->pfit_control = 0; | 91 | * adjusted whilst the pipe is disabled, according to |
92 | * register description and PRM. | ||
93 | */ | ||
94 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | ||
95 | intel_lvds->pfit_control, | ||
96 | intel_lvds->pfit_pgm_ratios); | ||
97 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) { | ||
98 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
99 | } else { | ||
100 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | ||
101 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
100 | intel_lvds->pfit_dirty = false; | 102 | intel_lvds->pfit_dirty = false; |
101 | } | 103 | } |
104 | } | ||
105 | |||
106 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON); | ||
107 | POSTING_READ(lvds_reg); | ||
108 | |||
109 | intel_panel_set_backlight(dev, dev_priv->backlight_level); | ||
110 | } | ||
111 | |||
112 | static void intel_lvds_disable(struct intel_lvds *intel_lvds) | ||
113 | { | ||
114 | struct drm_device *dev = intel_lvds->base.base.dev; | ||
115 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
116 | u32 ctl_reg, lvds_reg; | ||
117 | |||
118 | if (HAS_PCH_SPLIT(dev)) { | ||
119 | ctl_reg = PCH_PP_CONTROL; | ||
120 | lvds_reg = PCH_LVDS; | ||
121 | } else { | ||
122 | ctl_reg = PP_CONTROL; | ||
123 | lvds_reg = LVDS; | ||
124 | } | ||
125 | |||
126 | dev_priv->backlight_level = intel_panel_get_backlight(dev); | ||
127 | intel_panel_set_backlight(dev, 0); | ||
128 | |||
129 | I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON); | ||
130 | |||
131 | if (intel_lvds->pfit_control) { | ||
132 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | ||
133 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
102 | 134 | ||
103 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | 135 | I915_WRITE(PFIT_CONTROL, 0); |
136 | intel_lvds->pfit_dirty = true; | ||
104 | } | 137 | } |
138 | |||
139 | I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); | ||
105 | POSTING_READ(lvds_reg); | 140 | POSTING_READ(lvds_reg); |
106 | } | 141 | } |
107 | 142 | ||
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode) | |||
110 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | 145 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); |
111 | 146 | ||
112 | if (mode == DRM_MODE_DPMS_ON) | 147 | if (mode == DRM_MODE_DPMS_ON) |
113 | intel_lvds_set_power(intel_lvds, true); | 148 | intel_lvds_enable(intel_lvds); |
114 | else | 149 | else |
115 | intel_lvds_set_power(intel_lvds, false); | 150 | intel_lvds_disable(intel_lvds); |
116 | 151 | ||
117 | /* XXX: We never power down the LVDS pairs. */ | 152 | /* XXX: We never power down the LVDS pairs. */ |
118 | } | 153 | } |
@@ -411,43 +446,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder) | |||
411 | /* Always do a full power on as we do not know what state | 446 | /* Always do a full power on as we do not know what state |
412 | * we were left in. | 447 | * we were left in. |
413 | */ | 448 | */ |
414 | intel_lvds_set_power(intel_lvds, true); | 449 | intel_lvds_enable(intel_lvds); |
415 | } | 450 | } |
416 | 451 | ||
417 | static void intel_lvds_mode_set(struct drm_encoder *encoder, | 452 | static void intel_lvds_mode_set(struct drm_encoder *encoder, |
418 | struct drm_display_mode *mode, | 453 | struct drm_display_mode *mode, |
419 | struct drm_display_mode *adjusted_mode) | 454 | struct drm_display_mode *adjusted_mode) |
420 | { | 455 | { |
421 | struct drm_device *dev = encoder->dev; | ||
422 | struct drm_i915_private *dev_priv = dev->dev_private; | ||
423 | struct intel_lvds *intel_lvds = to_intel_lvds(encoder); | ||
424 | |||
425 | /* | 456 | /* |
426 | * The LVDS pin pair will already have been turned on in the | 457 | * The LVDS pin pair will already have been turned on in the |
427 | * intel_crtc_mode_set since it has a large impact on the DPLL | 458 | * intel_crtc_mode_set since it has a large impact on the DPLL |
428 | * settings. | 459 | * settings. |
429 | */ | 460 | */ |
430 | |||
431 | if (HAS_PCH_SPLIT(dev)) | ||
432 | return; | ||
433 | |||
434 | if (!intel_lvds->pfit_dirty) | ||
435 | return; | ||
436 | |||
437 | /* | ||
438 | * Enable automatic panel scaling so that non-native modes fill the | ||
439 | * screen. Should be enabled before the pipe is enabled, according to | ||
440 | * register description and PRM. | ||
441 | */ | ||
442 | DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n", | ||
443 | intel_lvds->pfit_control, | ||
444 | intel_lvds->pfit_pgm_ratios); | ||
445 | if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) | ||
446 | DRM_ERROR("timed out waiting for panel to power off\n"); | ||
447 | |||
448 | I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios); | ||
449 | I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control); | ||
450 | intel_lvds->pfit_dirty = false; | ||
451 | } | 461 | } |
452 | 462 | ||
453 | /** | 463 | /** |
@@ -837,7 +847,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin) | |||
837 | * Create the connector, register the LVDS DDC bus, and try to figure out what | 847 | * Create the connector, register the LVDS DDC bus, and try to figure out what |
838 | * modes we can display on the LVDS panel (if present). | 848 | * modes we can display on the LVDS panel (if present). |
839 | */ | 849 | */ |
840 | void intel_lvds_init(struct drm_device *dev) | 850 | bool intel_lvds_init(struct drm_device *dev) |
841 | { | 851 | { |
842 | struct drm_i915_private *dev_priv = dev->dev_private; | 852 | struct drm_i915_private *dev_priv = dev->dev_private; |
843 | struct intel_lvds *intel_lvds; | 853 | struct intel_lvds *intel_lvds; |
@@ -853,37 +863,37 @@ void intel_lvds_init(struct drm_device *dev) | |||
853 | 863 | ||
854 | /* Skip init on machines we know falsely report LVDS */ | 864 | /* Skip init on machines we know falsely report LVDS */ |
855 | if (dmi_check_system(intel_no_lvds)) | 865 | if (dmi_check_system(intel_no_lvds)) |
856 | return; | 866 | return false; |
857 | 867 | ||
858 | pin = GMBUS_PORT_PANEL; | 868 | pin = GMBUS_PORT_PANEL; |
859 | if (!lvds_is_present_in_vbt(dev, &pin)) { | 869 | if (!lvds_is_present_in_vbt(dev, &pin)) { |
860 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); | 870 | DRM_DEBUG_KMS("LVDS is not present in VBT\n"); |
861 | return; | 871 | return false; |
862 | } | 872 | } |
863 | 873 | ||
864 | if (HAS_PCH_SPLIT(dev)) { | 874 | if (HAS_PCH_SPLIT(dev)) { |
865 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) | 875 | if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) |
866 | return; | 876 | return false; |
867 | if (dev_priv->edp.support) { | 877 | if (dev_priv->edp.support) { |
868 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); | 878 | DRM_DEBUG_KMS("disable LVDS for eDP support\n"); |
869 | return; | 879 | return false; |
870 | } | 880 | } |
871 | } | 881 | } |
872 | 882 | ||
873 | if (!intel_lvds_ddc_probe(dev, pin)) { | 883 | if (!intel_lvds_ddc_probe(dev, pin)) { |
874 | DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); | 884 | DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); |
875 | return; | 885 | return false; |
876 | } | 886 | } |
877 | 887 | ||
878 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); | 888 | intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); |
879 | if (!intel_lvds) { | 889 | if (!intel_lvds) { |
880 | return; | 890 | return false; |
881 | } | 891 | } |
882 | 892 | ||
883 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); | 893 | intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); |
884 | if (!intel_connector) { | 894 | if (!intel_connector) { |
885 | kfree(intel_lvds); | 895 | kfree(intel_lvds); |
886 | return; | 896 | return false; |
887 | } | 897 | } |
888 | 898 | ||
889 | if (!HAS_PCH_SPLIT(dev)) { | 899 | if (!HAS_PCH_SPLIT(dev)) { |
@@ -1026,7 +1036,7 @@ out: | |||
1026 | /* keep the LVDS connector */ | 1036 | /* keep the LVDS connector */ |
1027 | dev_priv->int_lvds_connector = connector; | 1037 | dev_priv->int_lvds_connector = connector; |
1028 | drm_sysfs_connector_add(connector); | 1038 | drm_sysfs_connector_add(connector); |
1029 | return; | 1039 | return true; |
1030 | 1040 | ||
1031 | failed: | 1041 | failed: |
1032 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); | 1042 | DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); |
@@ -1034,4 +1044,5 @@ failed: | |||
1034 | drm_encoder_cleanup(encoder); | 1044 | drm_encoder_cleanup(encoder); |
1035 | kfree(intel_lvds); | 1045 | kfree(intel_lvds); |
1036 | kfree(intel_connector); | 1046 | kfree(intel_connector); |
1047 | return false; | ||
1037 | } | 1048 | } |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index b83306f9244b..31cd7e33e820 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c | |||
@@ -156,23 +156,25 @@ static int init_ring_common(struct drm_device *dev, | |||
156 | 156 | ||
157 | /* G45 ring initialization fails to reset head to zero */ | 157 | /* G45 ring initialization fails to reset head to zero */ |
158 | if (head != 0) { | 158 | if (head != 0) { |
159 | DRM_ERROR("%s head not reset to zero " | 159 | DRM_DEBUG_KMS("%s head not reset to zero " |
160 | "ctl %08x head %08x tail %08x start %08x\n", | 160 | "ctl %08x head %08x tail %08x start %08x\n", |
161 | ring->name, | 161 | ring->name, |
162 | I915_READ_CTL(ring), | 162 | I915_READ_CTL(ring), |
163 | I915_READ_HEAD(ring), | 163 | I915_READ_HEAD(ring), |
164 | I915_READ_TAIL(ring), | 164 | I915_READ_TAIL(ring), |
165 | I915_READ_START(ring)); | 165 | I915_READ_START(ring)); |
166 | 166 | ||
167 | I915_WRITE_HEAD(ring, 0); | 167 | I915_WRITE_HEAD(ring, 0); |
168 | 168 | ||
169 | DRM_ERROR("%s head forced to zero " | 169 | if (I915_READ_HEAD(ring) & HEAD_ADDR) { |
170 | "ctl %08x head %08x tail %08x start %08x\n", | 170 | DRM_ERROR("failed to set %s head to zero " |
171 | ring->name, | 171 | "ctl %08x head %08x tail %08x start %08x\n", |
172 | I915_READ_CTL(ring), | 172 | ring->name, |
173 | I915_READ_HEAD(ring), | 173 | I915_READ_CTL(ring), |
174 | I915_READ_TAIL(ring), | 174 | I915_READ_HEAD(ring), |
175 | I915_READ_START(ring)); | 175 | I915_READ_TAIL(ring), |
176 | I915_READ_START(ring)); | ||
177 | } | ||
176 | } | 178 | } |
177 | 179 | ||
178 | I915_WRITE_CTL(ring, | 180 | I915_WRITE_CTL(ring, |
@@ -694,20 +696,17 @@ int intel_wait_ring_buffer(struct drm_device *dev, | |||
694 | drm_i915_private_t *dev_priv = dev->dev_private; | 696 | drm_i915_private_t *dev_priv = dev->dev_private; |
695 | u32 head; | 697 | u32 head; |
696 | 698 | ||
697 | head = intel_read_status_page(ring, 4); | ||
698 | if (head) { | ||
699 | ring->head = head & HEAD_ADDR; | ||
700 | ring->space = ring->head - (ring->tail + 8); | ||
701 | if (ring->space < 0) | ||
702 | ring->space += ring->size; | ||
703 | if (ring->space >= n) | ||
704 | return 0; | ||
705 | } | ||
706 | |||
707 | trace_i915_ring_wait_begin (dev); | 699 | trace_i915_ring_wait_begin (dev); |
708 | end = jiffies + 3 * HZ; | 700 | end = jiffies + 3 * HZ; |
709 | do { | 701 | do { |
710 | ring->head = I915_READ_HEAD(ring) & HEAD_ADDR; | 702 | /* If the reported head position has wrapped or hasn't advanced, |
703 | * fallback to the slow and accurate path. | ||
704 | */ | ||
705 | head = intel_read_status_page(ring, 4); | ||
706 | if (head < ring->actual_head) | ||
707 | head = I915_READ_HEAD(ring); | ||
708 | ring->actual_head = head; | ||
709 | ring->head = head & HEAD_ADDR; | ||
711 | ring->space = ring->head - (ring->tail + 8); | 710 | ring->space = ring->head - (ring->tail + 8); |
712 | if (ring->space < 0) | 711 | if (ring->space < 0) |
713 | ring->space += ring->size; | 712 | ring->space += ring->size; |
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h index 3126c2681983..d2cd0f1efeed 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.h +++ b/drivers/gpu/drm/i915/intel_ringbuffer.h | |||
@@ -30,8 +30,9 @@ struct intel_ring_buffer { | |||
30 | struct drm_device *dev; | 30 | struct drm_device *dev; |
31 | struct drm_gem_object *gem_object; | 31 | struct drm_gem_object *gem_object; |
32 | 32 | ||
33 | unsigned int head; | 33 | u32 actual_head; |
34 | unsigned int tail; | 34 | u32 head; |
35 | u32 tail; | ||
35 | int space; | 36 | int space; |
36 | struct intel_hw_status_page status_page; | 37 | struct intel_hw_status_page status_page; |
37 | 38 | ||
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index de158b76bcd5..27e63abf2a73 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c | |||
@@ -107,7 +107,8 @@ struct intel_sdvo { | |||
107 | * This is set if we treat the device as HDMI, instead of DVI. | 107 | * This is set if we treat the device as HDMI, instead of DVI. |
108 | */ | 108 | */ |
109 | bool is_hdmi; | 109 | bool is_hdmi; |
110 | bool has_audio; | 110 | bool has_hdmi_monitor; |
111 | bool has_hdmi_audio; | ||
111 | 112 | ||
112 | /** | 113 | /** |
113 | * This is set if we detect output of sdvo device as LVDS and | 114 | * This is set if we detect output of sdvo device as LVDS and |
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1023 | if (!intel_sdvo_set_target_input(intel_sdvo)) | 1024 | if (!intel_sdvo_set_target_input(intel_sdvo)) |
1024 | return; | 1025 | return; |
1025 | 1026 | ||
1026 | if (intel_sdvo->is_hdmi && | 1027 | if (intel_sdvo->has_hdmi_monitor && |
1027 | !intel_sdvo_set_avi_infoframe(intel_sdvo)) | 1028 | !intel_sdvo_set_avi_infoframe(intel_sdvo)) |
1028 | return; | 1029 | return; |
1029 | 1030 | ||
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder, | |||
1063 | } | 1064 | } |
1064 | if (intel_crtc->pipe == 1) | 1065 | if (intel_crtc->pipe == 1) |
1065 | sdvox |= SDVO_PIPE_B_SELECT; | 1066 | sdvox |= SDVO_PIPE_B_SELECT; |
1066 | if (intel_sdvo->has_audio) | 1067 | if (intel_sdvo->has_hdmi_audio) |
1067 | sdvox |= SDVO_AUDIO_ENABLE; | 1068 | sdvox |= SDVO_AUDIO_ENABLE; |
1068 | 1069 | ||
1069 | if (INTEL_INFO(dev)->gen >= 4) { | 1070 | if (INTEL_INFO(dev)->gen >= 4) { |
@@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector) | |||
1295 | return drm_get_edid(connector, &sdvo->ddc); | 1296 | return drm_get_edid(connector, &sdvo->ddc); |
1296 | } | 1297 | } |
1297 | 1298 | ||
1298 | static struct drm_connector * | ||
1299 | intel_find_analog_connector(struct drm_device *dev) | ||
1300 | { | ||
1301 | struct drm_connector *connector; | ||
1302 | struct intel_sdvo *encoder; | ||
1303 | |||
1304 | list_for_each_entry(encoder, | ||
1305 | &dev->mode_config.encoder_list, | ||
1306 | base.base.head) { | ||
1307 | if (encoder->base.type == INTEL_OUTPUT_ANALOG) { | ||
1308 | list_for_each_entry(connector, | ||
1309 | &dev->mode_config.connector_list, | ||
1310 | head) { | ||
1311 | if (&encoder->base == | ||
1312 | intel_attached_encoder(connector)) | ||
1313 | return connector; | ||
1314 | } | ||
1315 | } | ||
1316 | } | ||
1317 | |||
1318 | return NULL; | ||
1319 | } | ||
1320 | |||
1321 | static int | ||
1322 | intel_analog_is_connected(struct drm_device *dev) | ||
1323 | { | ||
1324 | struct drm_connector *analog_connector; | ||
1325 | |||
1326 | analog_connector = intel_find_analog_connector(dev); | ||
1327 | if (!analog_connector) | ||
1328 | return false; | ||
1329 | |||
1330 | if (analog_connector->funcs->detect(analog_connector, false) == | ||
1331 | connector_status_disconnected) | ||
1332 | return false; | ||
1333 | |||
1334 | return true; | ||
1335 | } | ||
1336 | |||
1337 | /* Mac mini hack -- use the same DDC as the analog connector */ | 1299 | /* Mac mini hack -- use the same DDC as the analog connector */ |
1338 | static struct edid * | 1300 | static struct edid * |
1339 | intel_sdvo_get_analog_edid(struct drm_connector *connector) | 1301 | intel_sdvo_get_analog_edid(struct drm_connector *connector) |
1340 | { | 1302 | { |
1341 | struct drm_i915_private *dev_priv = connector->dev->dev_private; | 1303 | struct drm_i915_private *dev_priv = connector->dev->dev_private; |
1342 | 1304 | ||
1343 | if (!intel_analog_is_connected(connector->dev)) | 1305 | return drm_get_edid(connector, |
1344 | return NULL; | 1306 | &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); |
1345 | |||
1346 | return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter); | ||
1347 | } | 1307 | } |
1348 | 1308 | ||
1349 | enum drm_connector_status | 1309 | enum drm_connector_status |
@@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1388 | /* DDC bus is shared, match EDID to connector type */ | 1348 | /* DDC bus is shared, match EDID to connector type */ |
1389 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { | 1349 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
1390 | status = connector_status_connected; | 1350 | status = connector_status_connected; |
1391 | intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); | 1351 | if (intel_sdvo->is_hdmi) { |
1392 | intel_sdvo->has_audio = drm_detect_monitor_audio(edid); | 1352 | intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid); |
1353 | intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid); | ||
1354 | } | ||
1393 | } | 1355 | } |
1394 | connector->display_info.raw_edid = NULL; | 1356 | connector->display_info.raw_edid = NULL; |
1395 | kfree(edid); | 1357 | kfree(edid); |
@@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector) | |||
1398 | if (status == connector_status_connected) { | 1360 | if (status == connector_status_connected) { |
1399 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); | 1361 | struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); |
1400 | if (intel_sdvo_connector->force_audio) | 1362 | if (intel_sdvo_connector->force_audio) |
1401 | intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; | 1363 | intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0; |
1402 | } | 1364 | } |
1403 | 1365 | ||
1404 | return status; | 1366 | return status; |
@@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force) | |||
1415 | if (!intel_sdvo_write_cmd(intel_sdvo, | 1377 | if (!intel_sdvo_write_cmd(intel_sdvo, |
1416 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) | 1378 | SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) |
1417 | return connector_status_unknown; | 1379 | return connector_status_unknown; |
1418 | if (intel_sdvo->is_tv) { | 1380 | |
1419 | /* add 30ms delay when the output type is SDVO-TV */ | 1381 | /* add 30ms delay when the output type might be TV */ |
1382 | if (intel_sdvo->caps.output_flags & | ||
1383 | (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0)) | ||
1420 | mdelay(30); | 1384 | mdelay(30); |
1421 | } | 1385 | |
1422 | if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) | 1386 | if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) |
1423 | return connector_status_unknown; | 1387 | return connector_status_unknown; |
1424 | 1388 | ||
@@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) | |||
1472 | edid = intel_sdvo_get_analog_edid(connector); | 1436 | edid = intel_sdvo_get_analog_edid(connector); |
1473 | 1437 | ||
1474 | if (edid != NULL) { | 1438 | if (edid != NULL) { |
1475 | drm_mode_connector_update_edid_property(connector, edid); | 1439 | if (edid->input & DRM_EDID_INPUT_DIGITAL) { |
1476 | drm_add_edid_modes(connector, edid); | 1440 | drm_mode_connector_update_edid_property(connector, edid); |
1441 | drm_add_edid_modes(connector, edid); | ||
1442 | } | ||
1477 | connector->display_info.raw_edid = NULL; | 1443 | connector->display_info.raw_edid = NULL; |
1478 | kfree(edid); | 1444 | kfree(edid); |
1479 | } | 1445 | } |
@@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector, | |||
1713 | 1679 | ||
1714 | intel_sdvo_connector->force_audio = val; | 1680 | intel_sdvo_connector->force_audio = val; |
1715 | 1681 | ||
1716 | if (val > 0 && intel_sdvo->has_audio) | 1682 | if (val > 0 && intel_sdvo->has_hdmi_audio) |
1717 | return 0; | 1683 | return 0; |
1718 | if (val < 0 && !intel_sdvo->has_audio) | 1684 | if (val < 0 && !intel_sdvo->has_hdmi_audio) |
1719 | return 0; | 1685 | return 0; |
1720 | 1686 | ||
1721 | intel_sdvo->has_audio = val > 0; | 1687 | intel_sdvo->has_hdmi_audio = val > 0; |
1722 | goto done; | 1688 | goto done; |
1723 | } | 1689 | } |
1724 | 1690 | ||
@@ -1942,9 +1908,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv, | |||
1942 | speed = mapping->i2c_speed; | 1908 | speed = mapping->i2c_speed; |
1943 | } | 1909 | } |
1944 | 1910 | ||
1945 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; | 1911 | if (pin < GMBUS_NUM_PORTS) { |
1946 | intel_gmbus_set_speed(sdvo->i2c, speed); | 1912 | sdvo->i2c = &dev_priv->gmbus[pin].adapter; |
1947 | intel_gmbus_force_bit(sdvo->i2c, true); | 1913 | intel_gmbus_set_speed(sdvo->i2c, speed); |
1914 | intel_gmbus_force_bit(sdvo->i2c, true); | ||
1915 | } else | ||
1916 | sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter; | ||
1948 | } | 1917 | } |
1949 | 1918 | ||
1950 | static bool | 1919 | static bool |
@@ -2070,6 +2039,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2070 | intel_sdvo_set_colorimetry(intel_sdvo, | 2039 | intel_sdvo_set_colorimetry(intel_sdvo, |
2071 | SDVO_COLORIMETRY_RGB256); | 2040 | SDVO_COLORIMETRY_RGB256); |
2072 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; | 2041 | connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; |
2042 | |||
2043 | intel_sdvo_add_hdmi_properties(intel_sdvo_connector); | ||
2073 | intel_sdvo->is_hdmi = true; | 2044 | intel_sdvo->is_hdmi = true; |
2074 | } | 2045 | } |
2075 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | | 2046 | intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | |
@@ -2077,8 +2048,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device) | |||
2077 | 2048 | ||
2078 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); | 2049 | intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); |
2079 | 2050 | ||
2080 | intel_sdvo_add_hdmi_properties(intel_sdvo_connector); | ||
2081 | |||
2082 | return true; | 2051 | return true; |
2083 | } | 2052 | } |
2084 | 2053 | ||