aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c720
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c44
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c159
-rw-r--r--drivers/gpu/drm/i915/intel_display.c165
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c173
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c133
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c159
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c84
19 files changed, 1032 insertions, 682 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a26f4dd21ae..e6800819bca8 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -767,6 +767,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
767 case I915_PARAM_HAS_BLT: 767 case I915_PARAM_HAS_BLT:
768 value = HAS_BLT(dev); 768 value = HAS_BLT(dev);
769 break; 769 break;
770 case I915_PARAM_HAS_COHERENT_RINGS:
771 value = 1;
772 break;
770 default: 773 default:
771 DRM_DEBUG_DRIVER("Unknown parameter %d\n", 774 DRM_DEBUG_DRIVER("Unknown parameter %d\n",
772 param->param); 775 param->param);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd420760..f737960712e6 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); 44module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
45 45
46unsigned int i915_powersave = 1; 46unsigned int i915_powersave = 1;
47module_param_named(powersave, i915_powersave, int, 0400); 47module_param_named(powersave, i915_powersave, int, 0600);
48 48
49unsigned int i915_lvds_downclock = 0; 49unsigned int i915_lvds_downclock = 0;
50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); 50module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = {
150 150
151static const struct intel_device_info intel_ironlake_m_info = { 151static const struct intel_device_info intel_ironlake_m_info = {
152 .gen = 5, .is_mobile = 1, 152 .gen = 5, .is_mobile = 1,
153 .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1, 153 .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
154 .has_fbc = 0, /* disabled due to buggy hardware */
154 .has_bsd_ring = 1, 155 .has_bsd_ring = 1,
155}; 156};
156 157
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285e..409826da3099 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj);
1045int i915_gem_object_set_domain(struct drm_gem_object *obj, 1045int i915_gem_object_set_domain(struct drm_gem_object *obj,
1046 uint32_t read_domains, 1046 uint32_t read_domains,
1047 uint32_t write_domain); 1047 uint32_t write_domain);
1048int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
1049 bool interruptible);
1048int i915_gem_init_ringbuffer(struct drm_device *dev); 1050int i915_gem_init_ringbuffer(struct drm_device *dev);
1049void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 1051void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
1050int i915_gem_do_init(struct drm_device *dev, unsigned long start, 1052int i915_gem_do_init(struct drm_device *dev, unsigned long start,
@@ -1321,6 +1323,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
1321 1323
1322#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type) 1324#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
1323#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1325#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1326#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1324 1327
1325#define PRIMARY_RINGBUFFER_SIZE (128*1024) 1328#define PRIMARY_RINGBUFFER_SIZE (128*1024)
1326 1329
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b5..275ec6ed43ae 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,8 +38,7 @@
38 38
39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj); 39static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
40 40
41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, 41static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42 bool pipelined);
43static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
44static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
45static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, 44static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -547,6 +546,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
547 struct drm_i915_gem_object *obj_priv; 546 struct drm_i915_gem_object *obj_priv;
548 int ret = 0; 547 int ret = 0;
549 548
549 if (args->size == 0)
550 return 0;
551
552 if (!access_ok(VERIFY_WRITE,
553 (char __user *)(uintptr_t)args->data_ptr,
554 args->size))
555 return -EFAULT;
556
557 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
558 args->size);
559 if (ret)
560 return -EFAULT;
561
550 ret = i915_mutex_lock_interruptible(dev); 562 ret = i915_mutex_lock_interruptible(dev);
551 if (ret) 563 if (ret)
552 return ret; 564 return ret;
@@ -564,23 +576,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
564 goto out; 576 goto out;
565 } 577 }
566 578
567 if (args->size == 0)
568 goto out;
569
570 if (!access_ok(VERIFY_WRITE,
571 (char __user *)(uintptr_t)args->data_ptr,
572 args->size)) {
573 ret = -EFAULT;
574 goto out;
575 }
576
577 ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
578 args->size);
579 if (ret) {
580 ret = -EFAULT;
581 goto out;
582 }
583
584 ret = i915_gem_object_get_pages_or_evict(obj); 579 ret = i915_gem_object_get_pages_or_evict(obj);
585 if (ret) 580 if (ret)
586 goto out; 581 goto out;
@@ -981,7 +976,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
981 struct drm_i915_gem_pwrite *args = data; 976 struct drm_i915_gem_pwrite *args = data;
982 struct drm_gem_object *obj; 977 struct drm_gem_object *obj;
983 struct drm_i915_gem_object *obj_priv; 978 struct drm_i915_gem_object *obj_priv;
984 int ret = 0; 979 int ret;
980
981 if (args->size == 0)
982 return 0;
983
984 if (!access_ok(VERIFY_READ,
985 (char __user *)(uintptr_t)args->data_ptr,
986 args->size))
987 return -EFAULT;
988
989 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
990 args->size);
991 if (ret)
992 return -EFAULT;
985 993
986 ret = i915_mutex_lock_interruptible(dev); 994 ret = i915_mutex_lock_interruptible(dev);
987 if (ret) 995 if (ret)
@@ -994,30 +1002,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
994 } 1002 }
995 obj_priv = to_intel_bo(obj); 1003 obj_priv = to_intel_bo(obj);
996 1004
997
998 /* Bounds check destination. */ 1005 /* Bounds check destination. */
999 if (args->offset > obj->size || args->size > obj->size - args->offset) { 1006 if (args->offset > obj->size || args->size > obj->size - args->offset) {
1000 ret = -EINVAL; 1007 ret = -EINVAL;
1001 goto out; 1008 goto out;
1002 } 1009 }
1003 1010
1004 if (args->size == 0)
1005 goto out;
1006
1007 if (!access_ok(VERIFY_READ,
1008 (char __user *)(uintptr_t)args->data_ptr,
1009 args->size)) {
1010 ret = -EFAULT;
1011 goto out;
1012 }
1013
1014 ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
1015 args->size);
1016 if (ret) {
1017 ret = -EFAULT;
1018 goto out;
1019 }
1020
1021 /* We can only do the GTT pwrite on untiled buffers, as otherwise 1011 /* We can only do the GTT pwrite on untiled buffers, as otherwise
1022 * it would end up going through the fenced access, and we'll get 1012 * it would end up going through the fenced access, and we'll get
1023 * different detiling behavior between reading and writing. 1013 * different detiling behavior between reading and writing.
@@ -2172,7 +2162,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
2172static int i915_ring_idle(struct drm_device *dev, 2162static int i915_ring_idle(struct drm_device *dev,
2173 struct intel_ring_buffer *ring) 2163 struct intel_ring_buffer *ring)
2174{ 2164{
2175 if (list_empty(&ring->gpu_write_list)) 2165 if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
2176 return 0; 2166 return 0;
2177 2167
2178 i915_gem_flush_ring(dev, NULL, ring, 2168 i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2180,7 @@ i915_gpu_idle(struct drm_device *dev)
2190 int ret; 2180 int ret;
2191 2181
2192 lists_empty = (list_empty(&dev_priv->mm.flushing_list) && 2182 lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
2193 list_empty(&dev_priv->render_ring.active_list) && 2183 list_empty(&dev_priv->mm.active_list));
2194 list_empty(&dev_priv->bsd_ring.active_list) &&
2195 list_empty(&dev_priv->blt_ring.active_list));
2196 if (lists_empty) 2184 if (lists_empty)
2197 return 0; 2185 return 0;
2198 2186
@@ -2605,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
2605 if (reg->gpu) { 2593 if (reg->gpu) {
2606 int ret; 2594 int ret;
2607 2595
2608 ret = i915_gem_object_flush_gpu_write_domain(obj, true); 2596 ret = i915_gem_object_flush_gpu_write_domain(obj);
2609 if (ret) 2597 if (ret)
2610 return ret; 2598 return ret;
2611 2599
@@ -2753,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2753 2741
2754/** Flushes any GPU write domain for the object if it's dirty. */ 2742/** Flushes any GPU write domain for the object if it's dirty. */
2755static int 2743static int
2756i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj, 2744i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2757 bool pipelined)
2758{ 2745{
2759 struct drm_device *dev = obj->dev; 2746 struct drm_device *dev = obj->dev;
2760 uint32_t old_write_domain; 2747 uint32_t old_write_domain;
@@ -2773,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
2773 obj->read_domains, 2760 obj->read_domains,
2774 old_write_domain); 2761 old_write_domain);
2775 2762
2776 if (pipelined) 2763 return 0;
2777 return 0;
2778
2779 return i915_gem_object_wait_rendering(obj, true);
2780} 2764}
2781 2765
2782/** Flushes the GTT write domain for the object if it's dirty. */ 2766/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2837,18 +2821,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
2837 if (obj_priv->gtt_space == NULL) 2821 if (obj_priv->gtt_space == NULL)
2838 return -EINVAL; 2822 return -EINVAL;
2839 2823
2840 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 2824 ret = i915_gem_object_flush_gpu_write_domain(obj);
2841 if (ret != 0) 2825 if (ret != 0)
2842 return ret; 2826 return ret;
2827 ret = i915_gem_object_wait_rendering(obj, true);
2828 if (ret)
2829 return ret;
2843 2830
2844 i915_gem_object_flush_cpu_write_domain(obj); 2831 i915_gem_object_flush_cpu_write_domain(obj);
2845 2832
2846 if (write) {
2847 ret = i915_gem_object_wait_rendering(obj, true);
2848 if (ret)
2849 return ret;
2850 }
2851
2852 old_write_domain = obj->write_domain; 2833 old_write_domain = obj->write_domain;
2853 old_read_domains = obj->read_domains; 2834 old_read_domains = obj->read_domains;
2854 2835
@@ -2886,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2886 if (obj_priv->gtt_space == NULL) 2867 if (obj_priv->gtt_space == NULL)
2887 return -EINVAL; 2868 return -EINVAL;
2888 2869
2889 ret = i915_gem_object_flush_gpu_write_domain(obj, true); 2870 ret = i915_gem_object_flush_gpu_write_domain(obj);
2890 if (ret) 2871 if (ret)
2891 return ret; 2872 return ret;
2892 2873
@@ -2909,6 +2890,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
2909 return 0; 2890 return 0;
2910} 2891}
2911 2892
2893int
2894i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
2895 bool interruptible)
2896{
2897 if (!obj->active)
2898 return 0;
2899
2900 if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
2901 i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
2902 0, obj->base.write_domain);
2903
2904 return i915_gem_object_wait_rendering(&obj->base, interruptible);
2905}
2906
2912/** 2907/**
2913 * Moves a single object to the CPU read, and possibly write domain. 2908 * Moves a single object to the CPU read, and possibly write domain.
2914 * 2909 *
@@ -2921,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2921 uint32_t old_write_domain, old_read_domains; 2916 uint32_t old_write_domain, old_read_domains;
2922 int ret; 2917 int ret;
2923 2918
2924 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 2919 ret = i915_gem_object_flush_gpu_write_domain(obj);
2925 if (ret != 0) 2920 if (ret != 0)
2926 return ret; 2921 return ret;
2922 ret = i915_gem_object_wait_rendering(obj, true);
2923 if (ret)
2924 return ret;
2927 2925
2928 i915_gem_object_flush_gtt_write_domain(obj); 2926 i915_gem_object_flush_gtt_write_domain(obj);
2929 2927
@@ -2932,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
2932 */ 2930 */
2933 i915_gem_object_set_to_full_cpu_read_domain(obj); 2931 i915_gem_object_set_to_full_cpu_read_domain(obj);
2934 2932
2935 if (write) {
2936 ret = i915_gem_object_wait_rendering(obj, true);
2937 if (ret)
2938 return ret;
2939 }
2940
2941 old_write_domain = obj->write_domain; 2933 old_write_domain = obj->write_domain;
2942 old_read_domains = obj->read_domains; 2934 old_read_domains = obj->read_domains;
2943 2935
@@ -3108,7 +3100,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
3108 * write domain 3100 * write domain
3109 */ 3101 */
3110 if (obj->write_domain && 3102 if (obj->write_domain &&
3111 obj->write_domain != obj->pending_read_domains) { 3103 (obj->write_domain != obj->pending_read_domains ||
3104 obj_priv->ring != ring)) {
3112 flush_domains |= obj->write_domain; 3105 flush_domains |= obj->write_domain;
3113 invalidate_domains |= 3106 invalidate_domains |=
3114 obj->pending_read_domains & ~obj->write_domain; 3107 obj->pending_read_domains & ~obj->write_domain;
@@ -3201,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3201 if (offset == 0 && size == obj->size) 3194 if (offset == 0 && size == obj->size)
3202 return i915_gem_object_set_to_cpu_domain(obj, 0); 3195 return i915_gem_object_set_to_cpu_domain(obj, 0);
3203 3196
3204 ret = i915_gem_object_flush_gpu_write_domain(obj, false); 3197 ret = i915_gem_object_flush_gpu_write_domain(obj);
3205 if (ret != 0) 3198 if (ret != 0)
3206 return ret; 3199 return ret;
3200 ret = i915_gem_object_wait_rendering(obj, true);
3201 if (ret)
3202 return ret;
3203
3207 i915_gem_object_flush_gtt_write_domain(obj); 3204 i915_gem_object_flush_gtt_write_domain(obj);
3208 3205
3209 /* If we're already fully in the CPU read domain, we're done. */ 3206 /* If we're already fully in the CPU read domain, we're done. */
@@ -3250,192 +3247,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
3250 return 0; 3247 return 0;
3251} 3248}
3252 3249
3253/**
3254 * Pin an object to the GTT and evaluate the relocations landing in it.
3255 */
3256static int 3250static int
3257i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj, 3251i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
3258 struct drm_file *file_priv, 3252 struct drm_file *file_priv,
3259 struct drm_i915_gem_exec_object2 *entry) 3253 struct drm_i915_gem_exec_object2 *entry,
3254 struct drm_i915_gem_relocation_entry *reloc)
3260{ 3255{
3261 struct drm_device *dev = obj->base.dev; 3256 struct drm_device *dev = obj->base.dev;
3262 drm_i915_private_t *dev_priv = dev->dev_private; 3257 struct drm_gem_object *target_obj;
3263 struct drm_i915_gem_relocation_entry __user *user_relocs; 3258 uint32_t target_offset;
3264 struct drm_gem_object *target_obj = NULL; 3259 int ret = -EINVAL;
3265 uint32_t target_handle = 0;
3266 int i, ret = 0;
3267 3260
3268 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr; 3261 target_obj = drm_gem_object_lookup(dev, file_priv,
3269 for (i = 0; i < entry->relocation_count; i++) { 3262 reloc->target_handle);
3270 struct drm_i915_gem_relocation_entry reloc; 3263 if (target_obj == NULL)
3271 uint32_t target_offset; 3264 return -ENOENT;
3272 3265
3273 if (__copy_from_user_inatomic(&reloc, 3266 target_offset = to_intel_bo(target_obj)->gtt_offset;
3274 user_relocs+i,
3275 sizeof(reloc))) {
3276 ret = -EFAULT;
3277 break;
3278 }
3279 3267
3280 if (reloc.target_handle != target_handle) { 3268#if WATCH_RELOC
3281 drm_gem_object_unreference(target_obj); 3269 DRM_INFO("%s: obj %p offset %08x target %d "
3270 "read %08x write %08x gtt %08x "
3271 "presumed %08x delta %08x\n",
3272 __func__,
3273 obj,
3274 (int) reloc->offset,
3275 (int) reloc->target_handle,
3276 (int) reloc->read_domains,
3277 (int) reloc->write_domain,
3278 (int) target_offset,
3279 (int) reloc->presumed_offset,
3280 reloc->delta);
3281#endif
3282 3282
3283 target_obj = drm_gem_object_lookup(dev, file_priv, 3283 /* The target buffer should have appeared before us in the
3284 reloc.target_handle); 3284 * exec_object list, so it should have a GTT space bound by now.
3285 if (target_obj == NULL) { 3285 */
3286 ret = -ENOENT; 3286 if (target_offset == 0) {
3287 break; 3287 DRM_ERROR("No GTT space found for object %d\n",
3288 } 3288 reloc->target_handle);
3289 goto err;
3290 }
3289 3291
3290 target_handle = reloc.target_handle; 3292 /* Validate that the target is in a valid r/w GPU domain */
3291 } 3293 if (reloc->write_domain & (reloc->write_domain - 1)) {
3292 target_offset = to_intel_bo(target_obj)->gtt_offset; 3294 DRM_ERROR("reloc with multiple write domains: "
3295 "obj %p target %d offset %d "
3296 "read %08x write %08x",
3297 obj, reloc->target_handle,
3298 (int) reloc->offset,
3299 reloc->read_domains,
3300 reloc->write_domain);
3301 goto err;
3302 }
3303 if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
3304 reloc->read_domains & I915_GEM_DOMAIN_CPU) {
3305 DRM_ERROR("reloc with read/write CPU domains: "
3306 "obj %p target %d offset %d "
3307 "read %08x write %08x",
3308 obj, reloc->target_handle,
3309 (int) reloc->offset,
3310 reloc->read_domains,
3311 reloc->write_domain);
3312 goto err;
3313 }
3314 if (reloc->write_domain && target_obj->pending_write_domain &&
3315 reloc->write_domain != target_obj->pending_write_domain) {
3316 DRM_ERROR("Write domain conflict: "
3317 "obj %p target %d offset %d "
3318 "new %08x old %08x\n",
3319 obj, reloc->target_handle,
3320 (int) reloc->offset,
3321 reloc->write_domain,
3322 target_obj->pending_write_domain);
3323 goto err;
3324 }
3293 3325
3294#if WATCH_RELOC 3326 target_obj->pending_read_domains |= reloc->read_domains;
3295 DRM_INFO("%s: obj %p offset %08x target %d " 3327 target_obj->pending_write_domain |= reloc->write_domain;
3296 "read %08x write %08x gtt %08x "
3297 "presumed %08x delta %08x\n",
3298 __func__,
3299 obj,
3300 (int) reloc.offset,
3301 (int) reloc.target_handle,
3302 (int) reloc.read_domains,
3303 (int) reloc.write_domain,
3304 (int) target_offset,
3305 (int) reloc.presumed_offset,
3306 reloc.delta);
3307#endif
3308 3328
3309 /* The target buffer should have appeared before us in the 3329 /* If the relocation already has the right value in it, no
3310 * exec_object list, so it should have a GTT space bound by now. 3330 * more work needs to be done.
3311 */ 3331 */
3312 if (target_offset == 0) { 3332 if (target_offset == reloc->presumed_offset)
3313 DRM_ERROR("No GTT space found for object %d\n", 3333 goto out;
3314 reloc.target_handle);
3315 ret = -EINVAL;
3316 break;
3317 }
3318 3334
3319 /* Validate that the target is in a valid r/w GPU domain */ 3335 /* Check that the relocation address is valid... */
3320 if (reloc.write_domain & (reloc.write_domain - 1)) { 3336 if (reloc->offset > obj->base.size - 4) {
3321 DRM_ERROR("reloc with multiple write domains: " 3337 DRM_ERROR("Relocation beyond object bounds: "
3322 "obj %p target %d offset %d " 3338 "obj %p target %d offset %d size %d.\n",
3323 "read %08x write %08x", 3339 obj, reloc->target_handle,
3324 obj, reloc.target_handle, 3340 (int) reloc->offset,
3325 (int) reloc.offset, 3341 (int) obj->base.size);
3326 reloc.read_domains, 3342 goto err;
3327 reloc.write_domain); 3343 }
3328 ret = -EINVAL; 3344 if (reloc->offset & 3) {
3329 break; 3345 DRM_ERROR("Relocation not 4-byte aligned: "
3330 } 3346 "obj %p target %d offset %d.\n",
3331 if (reloc.write_domain & I915_GEM_DOMAIN_CPU || 3347 obj, reloc->target_handle,
3332 reloc.read_domains & I915_GEM_DOMAIN_CPU) { 3348 (int) reloc->offset);
3333 DRM_ERROR("reloc with read/write CPU domains: " 3349 goto err;
3334 "obj %p target %d offset %d " 3350 }
3335 "read %08x write %08x",
3336 obj, reloc.target_handle,
3337 (int) reloc.offset,
3338 reloc.read_domains,
3339 reloc.write_domain);
3340 ret = -EINVAL;
3341 break;
3342 }
3343 if (reloc.write_domain && target_obj->pending_write_domain &&
3344 reloc.write_domain != target_obj->pending_write_domain) {
3345 DRM_ERROR("Write domain conflict: "
3346 "obj %p target %d offset %d "
3347 "new %08x old %08x\n",
3348 obj, reloc.target_handle,
3349 (int) reloc.offset,
3350 reloc.write_domain,
3351 target_obj->pending_write_domain);
3352 ret = -EINVAL;
3353 break;
3354 }
3355 3351
3356 target_obj->pending_read_domains |= reloc.read_domains; 3352 /* and points to somewhere within the target object. */
3357 target_obj->pending_write_domain |= reloc.write_domain; 3353 if (reloc->delta >= target_obj->size) {
3354 DRM_ERROR("Relocation beyond target object bounds: "
3355 "obj %p target %d delta %d size %d.\n",
3356 obj, reloc->target_handle,
3357 (int) reloc->delta,
3358 (int) target_obj->size);
3359 goto err;
3360 }
3358 3361
3359 /* If the relocation already has the right value in it, no 3362 reloc->delta += target_offset;
3360 * more work needs to be done. 3363 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
3361 */ 3364 uint32_t page_offset = reloc->offset & ~PAGE_MASK;
3362 if (target_offset == reloc.presumed_offset) 3365 char *vaddr;
3363 continue;
3364 3366
3365 /* Check that the relocation address is valid... */ 3367 vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
3366 if (reloc.offset > obj->base.size - 4) { 3368 *(uint32_t *)(vaddr + page_offset) = reloc->delta;
3367 DRM_ERROR("Relocation beyond object bounds: " 3369 kunmap_atomic(vaddr);
3368 "obj %p target %d offset %d size %d.\n", 3370 } else {
3369 obj, reloc.target_handle, 3371 struct drm_i915_private *dev_priv = dev->dev_private;
3370 (int) reloc.offset, (int) obj->base.size); 3372 uint32_t __iomem *reloc_entry;
3371 ret = -EINVAL; 3373 void __iomem *reloc_page;
3372 break;
3373 }
3374 if (reloc.offset & 3) {
3375 DRM_ERROR("Relocation not 4-byte aligned: "
3376 "obj %p target %d offset %d.\n",
3377 obj, reloc.target_handle,
3378 (int) reloc.offset);
3379 ret = -EINVAL;
3380 break;
3381 }
3382 3374
3383 /* and points to somewhere within the target object. */ 3375 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
3384 if (reloc.delta >= target_obj->size) { 3376 if (ret)
3385 DRM_ERROR("Relocation beyond target object bounds: " 3377 goto err;
3386 "obj %p target %d delta %d size %d.\n",
3387 obj, reloc.target_handle,
3388 (int) reloc.delta, (int) target_obj->size);
3389 ret = -EINVAL;
3390 break;
3391 }
3392 3378
3393 reloc.delta += target_offset; 3379 /* Map the page containing the relocation we're going to perform. */
3394 if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) { 3380 reloc->offset += obj->gtt_offset;
3395 uint32_t page_offset = reloc.offset & ~PAGE_MASK; 3381 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
3396 char *vaddr; 3382 reloc->offset & PAGE_MASK);
3383 reloc_entry = (uint32_t __iomem *)
3384 (reloc_page + (reloc->offset & ~PAGE_MASK));
3385 iowrite32(reloc->delta, reloc_entry);
3386 io_mapping_unmap_atomic(reloc_page);
3387 }
3397 3388
3398 vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]); 3389 /* and update the user's relocation entry */
3399 *(uint32_t *)(vaddr + page_offset) = reloc.delta; 3390 reloc->presumed_offset = target_offset;
3400 kunmap_atomic(vaddr);
3401 } else {
3402 uint32_t __iomem *reloc_entry;
3403 void __iomem *reloc_page;
3404 3391
3405 ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1); 3392out:
3406 if (ret) 3393 ret = 0;
3407 break; 3394err:
3395 drm_gem_object_unreference(target_obj);
3396 return ret;
3397}
3408 3398
3409 /* Map the page containing the relocation we're going to perform. */ 3399static int
3410 reloc.offset += obj->gtt_offset; 3400i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
3411 reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping, 3401 struct drm_file *file_priv,
3412 reloc.offset & PAGE_MASK); 3402 struct drm_i915_gem_exec_object2 *entry)
3413 reloc_entry = (uint32_t __iomem *) 3403{
3414 (reloc_page + (reloc.offset & ~PAGE_MASK)); 3404 struct drm_i915_gem_relocation_entry __user *user_relocs;
3415 iowrite32(reloc.delta, reloc_entry); 3405 int i, ret;
3416 io_mapping_unmap_atomic(reloc_page); 3406
3417 } 3407 user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
3408 for (i = 0; i < entry->relocation_count; i++) {
3409 struct drm_i915_gem_relocation_entry reloc;
3410
3411 if (__copy_from_user_inatomic(&reloc,
3412 user_relocs+i,
3413 sizeof(reloc)))
3414 return -EFAULT;
3415
3416 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
3417 if (ret)
3418 return ret;
3418 3419
3419 /* and update the user's relocation entry */
3420 reloc.presumed_offset = target_offset;
3421 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset, 3420 if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
3422 &reloc.presumed_offset, 3421 &reloc.presumed_offset,
3423 sizeof(reloc.presumed_offset))) { 3422 sizeof(reloc.presumed_offset)))
3424 ret = -EFAULT; 3423 return -EFAULT;
3425 break;
3426 }
3427 } 3424 }
3428 3425
3429 drm_gem_object_unreference(target_obj); 3426 return 0;
3430 return ret; 3427}
3428
3429static int
3430i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
3431 struct drm_file *file_priv,
3432 struct drm_i915_gem_exec_object2 *entry,
3433 struct drm_i915_gem_relocation_entry *relocs)
3434{
3435 int i, ret;
3436
3437 for (i = 0; i < entry->relocation_count; i++) {
3438 ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
3439 if (ret)
3440 return ret;
3441 }
3442
3443 return 0;
3431} 3444}
3432 3445
3433static int 3446static int
3434i915_gem_execbuffer_pin(struct drm_device *dev, 3447i915_gem_execbuffer_relocate(struct drm_device *dev,
3435 struct drm_file *file, 3448 struct drm_file *file,
3436 struct drm_gem_object **object_list, 3449 struct drm_gem_object **object_list,
3437 struct drm_i915_gem_exec_object2 *exec_list, 3450 struct drm_i915_gem_exec_object2 *exec_list,
3438 int count) 3451 int count)
3452{
3453 int i, ret;
3454
3455 for (i = 0; i < count; i++) {
3456 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3457 obj->base.pending_read_domains = 0;
3458 obj->base.pending_write_domain = 0;
3459 ret = i915_gem_execbuffer_relocate_object(obj, file,
3460 &exec_list[i]);
3461 if (ret)
3462 return ret;
3463 }
3464
3465 return 0;
3466}
3467
3468static int
3469i915_gem_execbuffer_reserve(struct drm_device *dev,
3470 struct drm_file *file,
3471 struct drm_gem_object **object_list,
3472 struct drm_i915_gem_exec_object2 *exec_list,
3473 int count)
3439{ 3474{
3440 struct drm_i915_private *dev_priv = dev->dev_private; 3475 struct drm_i915_private *dev_priv = dev->dev_private;
3441 int ret, i, retry; 3476 int ret, i, retry;
@@ -3497,6 +3532,133 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
3497 return 0; 3532 return 0;
3498} 3533}
3499 3534
3535static int
3536i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
3537 struct drm_file *file,
3538 struct drm_gem_object **object_list,
3539 struct drm_i915_gem_exec_object2 *exec_list,
3540 int count)
3541{
3542 struct drm_i915_gem_relocation_entry *reloc;
3543 int i, total, ret;
3544
3545 for (i = 0; i < count; i++) {
3546 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3547 obj->in_execbuffer = false;
3548 }
3549
3550 mutex_unlock(&dev->struct_mutex);
3551
3552 total = 0;
3553 for (i = 0; i < count; i++)
3554 total += exec_list[i].relocation_count;
3555
3556 reloc = drm_malloc_ab(total, sizeof(*reloc));
3557 if (reloc == NULL) {
3558 mutex_lock(&dev->struct_mutex);
3559 return -ENOMEM;
3560 }
3561
3562 total = 0;
3563 for (i = 0; i < count; i++) {
3564 struct drm_i915_gem_relocation_entry __user *user_relocs;
3565
3566 user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
3567
3568 if (copy_from_user(reloc+total, user_relocs,
3569 exec_list[i].relocation_count *
3570 sizeof(*reloc))) {
3571 ret = -EFAULT;
3572 mutex_lock(&dev->struct_mutex);
3573 goto err;
3574 }
3575
3576 total += exec_list[i].relocation_count;
3577 }
3578
3579 ret = i915_mutex_lock_interruptible(dev);
3580 if (ret) {
3581 mutex_lock(&dev->struct_mutex);
3582 goto err;
3583 }
3584
3585 ret = i915_gem_execbuffer_reserve(dev, file,
3586 object_list, exec_list,
3587 count);
3588 if (ret)
3589 goto err;
3590
3591 total = 0;
3592 for (i = 0; i < count; i++) {
3593 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
3594 obj->base.pending_read_domains = 0;
3595 obj->base.pending_write_domain = 0;
3596 ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
3597 &exec_list[i],
3598 reloc + total);
3599 if (ret)
3600 goto err;
3601
3602 total += exec_list[i].relocation_count;
3603 }
3604
3605 /* Leave the user relocations as are, this is the painfully slow path,
3606 * and we want to avoid the complication of dropping the lock whilst
3607 * having buffers reserved in the aperture and so causing spurious
3608 * ENOSPC for random operations.
3609 */
3610
3611err:
3612 drm_free_large(reloc);
3613 return ret;
3614}
3615
3616static int
3617i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
3618 struct drm_file *file,
3619 struct intel_ring_buffer *ring,
3620 struct drm_gem_object **objects,
3621 int count)
3622{
3623 struct drm_i915_private *dev_priv = dev->dev_private;
3624 int ret, i;
3625
3626 /* Zero the global flush/invalidate flags. These
3627 * will be modified as new domains are computed
3628 * for each object
3629 */
3630 dev->invalidate_domains = 0;
3631 dev->flush_domains = 0;
3632 dev_priv->mm.flush_rings = 0;
3633 for (i = 0; i < count; i++)
3634 i915_gem_object_set_to_gpu_domain(objects[i], ring);
3635
3636 if (dev->invalidate_domains | dev->flush_domains) {
3637#if WATCH_EXEC
3638 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3639 __func__,
3640 dev->invalidate_domains,
3641 dev->flush_domains);
3642#endif
3643 i915_gem_flush(dev, file,
3644 dev->invalidate_domains,
3645 dev->flush_domains,
3646 dev_priv->mm.flush_rings);
3647 }
3648
3649 for (i = 0; i < count; i++) {
3650 struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
3651 /* XXX replace with semaphores */
3652 if (obj->ring && ring != obj->ring) {
3653 ret = i915_gem_object_wait_rendering(&obj->base, true);
3654 if (ret)
3655 return ret;
3656 }
3657 }
3658
3659 return 0;
3660}
3661
3500/* Throttle our rendering by waiting until the ring has completed our requests 3662/* Throttle our rendering by waiting until the ring has completed our requests
3501 * emitted over 20 msec ago. 3663 * emitted over 20 msec ago.
3502 * 3664 *
@@ -3580,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
3580 3742
3581 for (i = 0; i < count; i++) { 3743 for (i = 0; i < count; i++) {
3582 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr; 3744 char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
3583 size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry); 3745 int length; /* limited by fault_in_pages_readable() */
3746
3747 /* First check for malicious input causing overflow */
3748 if (exec[i].relocation_count >
3749 INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
3750 return -EINVAL;
3584 3751
3752 length = exec[i].relocation_count *
3753 sizeof(struct drm_i915_gem_relocation_entry);
3585 if (!access_ok(VERIFY_READ, ptr, length)) 3754 if (!access_ok(VERIFY_READ, ptr, length))
3586 return -EFAULT; 3755 return -EFAULT;
3587 3756
@@ -3724,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3724 } 3893 }
3725 3894
3726 /* Move the objects en-masse into the GTT, evicting if necessary. */ 3895 /* Move the objects en-masse into the GTT, evicting if necessary. */
3727 ret = i915_gem_execbuffer_pin(dev, file, 3896 ret = i915_gem_execbuffer_reserve(dev, file,
3728 object_list, exec_list, 3897 object_list, exec_list,
3729 args->buffer_count); 3898 args->buffer_count);
3730 if (ret) 3899 if (ret)
3731 goto err; 3900 goto err;
3732 3901
3733 /* The objects are in their final locations, apply the relocations. */ 3902 /* The objects are in their final locations, apply the relocations. */
3734 for (i = 0; i < args->buffer_count; i++) { 3903 ret = i915_gem_execbuffer_relocate(dev, file,
3735 struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]); 3904 object_list, exec_list,
3736 obj->base.pending_read_domains = 0; 3905 args->buffer_count);
3737 obj->base.pending_write_domain = 0; 3906 if (ret) {
3738 ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]); 3907 if (ret == -EFAULT) {
3908 ret = i915_gem_execbuffer_relocate_slow(dev, file,
3909 object_list,
3910 exec_list,
3911 args->buffer_count);
3912 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
3913 }
3739 if (ret) 3914 if (ret)
3740 goto err; 3915 goto err;
3741 } 3916 }
@@ -3757,33 +3932,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
3757 goto err; 3932 goto err;
3758 } 3933 }
3759 3934
3760 /* Zero the global flush/invalidate flags. These 3935 ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
3761 * will be modified as new domains are computed 3936 object_list, args->buffer_count);
3762 * for each object 3937 if (ret)
3763 */ 3938 goto err;
3764 dev->invalidate_domains = 0;
3765 dev->flush_domains = 0;
3766 dev_priv->mm.flush_rings = 0;
3767
3768 for (i = 0; i < args->buffer_count; i++) {
3769 struct drm_gem_object *obj = object_list[i];
3770
3771 /* Compute new gpu domains and update invalidate/flush */
3772 i915_gem_object_set_to_gpu_domain(obj, ring);
3773 }
3774
3775 if (dev->invalidate_domains | dev->flush_domains) {
3776#if WATCH_EXEC
3777 DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
3778 __func__,
3779 dev->invalidate_domains,
3780 dev->flush_domains);
3781#endif
3782 i915_gem_flush(dev, file,
3783 dev->invalidate_domains,
3784 dev->flush_domains,
3785 dev_priv->mm.flush_rings);
3786 }
3787 3939
3788 for (i = 0; i < args->buffer_count; i++) { 3940 for (i = 0; i < args->buffer_count; i++) {
3789 struct drm_gem_object *obj = object_list[i]; 3941 struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4195,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
4043 alignment = i915_gem_get_gtt_alignment(obj); 4195 alignment = i915_gem_get_gtt_alignment(obj);
4044 if (obj_priv->gtt_offset & (alignment - 1)) { 4196 if (obj_priv->gtt_offset & (alignment - 1)) {
4045 WARN(obj_priv->pin_count, 4197 WARN(obj_priv->pin_count,
4046 "bo is already pinned with incorrect alignment:" 4198 "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
4047 " offset=%x, req.alignment=%x\n",
4048 obj_priv->gtt_offset, alignment); 4199 obj_priv->gtt_offset, alignment);
4049 ret = i915_gem_object_unbind(obj); 4200 ret = i915_gem_object_unbind(obj);
4050 if (ret) 4201 if (ret)
@@ -4223,10 +4374,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
4223 * use this buffer rather sooner than later, so issuing the required 4374 * use this buffer rather sooner than later, so issuing the required
4224 * flush earlier is beneficial. 4375 * flush earlier is beneficial.
4225 */ 4376 */
4226 if (obj->write_domain & I915_GEM_GPU_DOMAINS) 4377 if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
4227 i915_gem_flush_ring(dev, file_priv, 4378 i915_gem_flush_ring(dev, file_priv,
4228 obj_priv->ring, 4379 obj_priv->ring,
4229 0, obj->write_domain); 4380 0, obj->write_domain);
4381 } else if (obj_priv->ring->outstanding_lazy_request) {
4382 /* This ring is not being cleared by active usage,
4383 * so emit a request to do so.
4384 */
4385 u32 seqno = i915_add_request(dev,
4386 NULL, NULL,
4387 obj_priv->ring);
4388 if (seqno == 0)
4389 ret = -ENOMEM;
4390 }
4230 4391
4231 /* Update the active list for the hardware's current position. 4392 /* Update the active list for the hardware's current position.
4232 * Otherwise this only updates on a delayed timer or when irqs 4393 * Otherwise this only updates on a delayed timer or when irqs
@@ -4856,17 +5017,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4856 struct drm_file *file_priv) 5017 struct drm_file *file_priv)
4857{ 5018{
4858 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj); 5019 struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
4859 void *obj_addr; 5020 void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
4860 int ret; 5021 char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
4861 char __user *user_data;
4862 5022
4863 user_data = (char __user *) (uintptr_t) args->data_ptr; 5023 DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
4864 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
4865 5024
4866 DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size); 5025 if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
4867 ret = copy_from_user(obj_addr, user_data, args->size); 5026 unsigned long unwritten;
4868 if (ret) 5027
4869 return -EFAULT; 5028 /* The physical object once assigned is fixed for the lifetime
5029 * of the obj, so we can safely drop the lock and continue
5030 * to access vaddr.
5031 */
5032 mutex_unlock(&dev->struct_mutex);
5033 unwritten = copy_from_user(vaddr, user_data, args->size);
5034 mutex_lock(&dev->struct_mutex);
5035 if (unwritten)
5036 return -EFAULT;
5037 }
4870 5038
4871 drm_agp_chipset_flush(dev); 5039 drm_agp_chipset_flush(dev);
4872 return 0; 5040 return 0;
@@ -4900,9 +5068,7 @@ i915_gpu_is_active(struct drm_device *dev)
4900 int lists_empty; 5068 int lists_empty;
4901 5069
4902 lists_empty = list_empty(&dev_priv->mm.flushing_list) && 5070 lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
4903 list_empty(&dev_priv->render_ring.active_list) && 5071 list_empty(&dev_priv->mm.active_list);
4904 list_empty(&dev_priv->bsd_ring.active_list) &&
4905 list_empty(&dev_priv->blt_ring.active_list);
4906 5072
4907 return !lists_empty; 5073 return !lists_empty;
4908} 5074}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53fa..d8ae7d1d0cc6 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
165 165
166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 166 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
167 list_empty(&dev_priv->mm.flushing_list) && 167 list_empty(&dev_priv->mm.flushing_list) &&
168 list_empty(&dev_priv->render_ring.active_list) && 168 list_empty(&dev_priv->mm.active_list));
169 list_empty(&dev_priv->bsd_ring.active_list) &&
170 list_empty(&dev_priv->blt_ring.active_list));
171 if (lists_empty) 169 if (lists_empty)
172 return -ENOSPC; 170 return -ENOSPC;
173 171
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
184 182
185 lists_empty = (list_empty(&dev_priv->mm.inactive_list) && 183 lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
186 list_empty(&dev_priv->mm.flushing_list) && 184 list_empty(&dev_priv->mm.flushing_list) &&
187 list_empty(&dev_priv->render_ring.active_list) && 185 list_empty(&dev_priv->mm.active_list));
188 list_empty(&dev_priv->bsd_ring.active_list) &&
189 list_empty(&dev_priv->blt_ring.active_list));
190 BUG_ON(!lists_empty); 186 BUG_ON(!lists_empty);
191 187
192 return 0; 188 return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 25ed911a3112..878fc766a12c 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3033,6 +3033,7 @@
3033#define TRANS_DP_10BPC (1<<9) 3033#define TRANS_DP_10BPC (1<<9)
3034#define TRANS_DP_6BPC (2<<9) 3034#define TRANS_DP_6BPC (2<<9)
3035#define TRANS_DP_12BPC (3<<9) 3035#define TRANS_DP_12BPC (3<<9)
3036#define TRANS_DP_BPC_MASK (3<<9)
3036#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4) 3037#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
3037#define TRANS_DP_VSYNC_ACTIVE_LOW 0 3038#define TRANS_DP_VSYNC_ACTIVE_LOW 0
3038#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3) 3039#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d959..42729d25da58 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
239 if (drm_core_check_feature(dev, DRIVER_MODESET)) 239 if (drm_core_check_feature(dev, DRIVER_MODESET))
240 return; 240 return;
241 241
242 /* Cursor state */
243 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
244 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
245 dev_priv->saveCURABASE = I915_READ(CURABASE);
246 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
247 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
248 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
249 if (IS_GEN2(dev))
250 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
251
242 if (HAS_PCH_SPLIT(dev)) { 252 if (HAS_PCH_SPLIT(dev)) {
243 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL); 253 dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
244 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL); 254 dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
529 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 539 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
530 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 540 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
531 541
542 /* Cursor state */
543 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
544 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
545 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
546 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
547 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
548 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
549 if (IS_GEN2(dev))
550 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
551
532 return; 552 return;
533} 553}
534 554
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
543 /* Don't save them in KMS mode */ 563 /* Don't save them in KMS mode */
544 i915_save_modeset_reg(dev); 564 i915_save_modeset_reg(dev);
545 565
546 /* Cursor state */
547 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
548 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
549 dev_priv->saveCURABASE = I915_READ(CURABASE);
550 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
551 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
552 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
553 if (IS_GEN2(dev))
554 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
555
556 /* CRT state */ 566 /* CRT state */
557 if (HAS_PCH_SPLIT(dev)) { 567 if (HAS_PCH_SPLIT(dev)) {
558 dev_priv->saveADPA = I915_READ(PCH_ADPA); 568 dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
657 /* Don't restore them in KMS mode */ 667 /* Don't restore them in KMS mode */
658 i915_restore_modeset_reg(dev); 668 i915_restore_modeset_reg(dev);
659 669
660 /* Cursor state */
661 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
662 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
663 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
664 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
665 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
666 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
667 if (IS_GEN2(dev))
668 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
669
670 /* CRT state */ 670 /* CRT state */
671 if (HAS_PCH_SPLIT(dev)) 671 if (HAS_PCH_SPLIT(dev))
672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA); 672 I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
862 /* Clock gating state */ 862 /* Clock gating state */
863 intel_init_clock_gating(dev); 863 intel_init_clock_gating(dev);
864 864
865 if (HAS_PCH_SPLIT(dev)) 865 if (HAS_PCH_SPLIT(dev)) {
866 ironlake_enable_drps(dev); 866 ironlake_enable_drps(dev);
867 intel_init_emon(dev);
868 }
867 869
868 /* Cache mode state */ 870 /* Cache mode state */
869 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000); 871 I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 65c88f9ba12c..2cb8e0b9f1ee 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -190,37 +190,6 @@ out:
190 kfree(output.pointer); 190 kfree(output.pointer);
191} 191}
192 192
193static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
194{
195 return 0;
196}
197
198static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
199 enum vga_switcheroo_state state)
200{
201 return 0;
202}
203
204static int intel_dsm_init(void)
205{
206 return 0;
207}
208
209static int intel_dsm_get_client_id(struct pci_dev *pdev)
210{
211 if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
212 return VGA_SWITCHEROO_IGD;
213 else
214 return VGA_SWITCHEROO_DIS;
215}
216
217static struct vga_switcheroo_handler intel_dsm_handler = {
218 .switchto = intel_dsm_switchto,
219 .power_state = intel_dsm_power_state,
220 .init = intel_dsm_init,
221 .get_client_id = intel_dsm_get_client_id,
222};
223
224static bool intel_dsm_pci_probe(struct pci_dev *pdev) 193static bool intel_dsm_pci_probe(struct pci_dev *pdev)
225{ 194{
226 acpi_handle dhandle, intel_handle; 195 acpi_handle dhandle, intel_handle;
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void)
276{ 245{
277 if (!intel_dsm_detect()) 246 if (!intel_dsm_detect())
278 return; 247 return;
279
280 vga_switcheroo_register_handler(&intel_dsm_handler);
281} 248}
282 249
283void intel_unregister_dsm_handler(void) 250void intel_unregister_dsm_handler(void)
284{ 251{
285 vga_switcheroo_unregister_handler();
286} 252}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c55c77043357..8df574316063 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -34,6 +34,25 @@
34#include "i915_drm.h" 34#include "i915_drm.h"
35#include "i915_drv.h" 35#include "i915_drv.h"
36 36
37/* Here's the desired hotplug mode */
38#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
39 ADPA_CRT_HOTPLUG_WARMUP_10MS | \
40 ADPA_CRT_HOTPLUG_SAMPLE_4S | \
41 ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
42 ADPA_CRT_HOTPLUG_VOLREF_325MV | \
43 ADPA_CRT_HOTPLUG_ENABLE)
44
45struct intel_crt {
46 struct intel_encoder base;
47 bool force_hotplug_required;
48};
49
50static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
51{
52 return container_of(intel_attached_encoder(connector),
53 struct intel_crt, base);
54}
55
37static void intel_crt_dpms(struct drm_encoder *encoder, int mode) 56static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
38{ 57{
39 struct drm_device *dev = encoder->dev; 58 struct drm_device *dev = encoder->dev;
@@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
129 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 148 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
130 } 149 }
131 150
132 adpa = 0; 151 adpa = ADPA_HOTPLUG_BITS;
133 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 152 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
134 adpa |= ADPA_HSYNC_ACTIVE_HIGH; 153 adpa |= ADPA_HSYNC_ACTIVE_HIGH;
135 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 154 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
157static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector) 176static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
158{ 177{
159 struct drm_device *dev = connector->dev; 178 struct drm_device *dev = connector->dev;
179 struct intel_crt *crt = intel_attached_crt(connector);
160 struct drm_i915_private *dev_priv = dev->dev_private; 180 struct drm_i915_private *dev_priv = dev->dev_private;
161 u32 adpa, temp; 181 u32 adpa;
162 bool ret; 182 bool ret;
163 bool turn_off_dac = false;
164 183
165 temp = adpa = I915_READ(PCH_ADPA); 184 /* The first time through, trigger an explicit detection cycle */
185 if (crt->force_hotplug_required) {
186 bool turn_off_dac = HAS_PCH_SPLIT(dev);
187 u32 save_adpa;
166 188
167 if (HAS_PCH_SPLIT(dev)) 189 crt->force_hotplug_required = 0;
168 turn_off_dac = true; 190
169 191 save_adpa = adpa = I915_READ(PCH_ADPA);
170 adpa &= ~ADPA_CRT_HOTPLUG_MASK; 192 DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
171 if (turn_off_dac) 193
172 adpa &= ~ADPA_DAC_ENABLE; 194 adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
173 195 if (turn_off_dac)
174 /* disable HPD first */ 196 adpa &= ~ADPA_DAC_ENABLE;
175 I915_WRITE(PCH_ADPA, adpa); 197
176 (void)I915_READ(PCH_ADPA); 198 I915_WRITE(PCH_ADPA, adpa);
177 199
178 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 | 200 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
179 ADPA_CRT_HOTPLUG_WARMUP_10MS | 201 1000))
180 ADPA_CRT_HOTPLUG_SAMPLE_4S | 202 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
181 ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */ 203
182 ADPA_CRT_HOTPLUG_VOLREF_325MV | 204 if (turn_off_dac) {
183 ADPA_CRT_HOTPLUG_ENABLE | 205 I915_WRITE(PCH_ADPA, save_adpa);
184 ADPA_CRT_HOTPLUG_FORCE_TRIGGER); 206 POSTING_READ(PCH_ADPA);
185 207 }
186 DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
187 I915_WRITE(PCH_ADPA, adpa);
188
189 if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
190 1000))
191 DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
192
193 if (turn_off_dac) {
194 /* Make sure hotplug is enabled */
195 I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
196 (void)I915_READ(PCH_ADPA);
197 } 208 }
198 209
199 /* Check the status to see if both blue and green are on now */ 210 /* Check the status to see if both blue and green are on now */
200 adpa = I915_READ(PCH_ADPA); 211 adpa = I915_READ(PCH_ADPA);
201 adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK; 212 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
202 if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
203 (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
204 ret = true; 213 ret = true;
205 else 214 else
206 ret = false; 215 ret = false;
216 DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
207 217
208 return ret; 218 return ret;
209} 219}
@@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
277 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1; 287 return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
278} 288}
279 289
280static bool intel_crt_detect_ddc(struct drm_encoder *encoder) 290static bool intel_crt_detect_ddc(struct intel_crt *crt)
281{ 291{
282 struct intel_encoder *intel_encoder = to_intel_encoder(encoder); 292 struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
283 struct drm_i915_private *dev_priv = encoder->dev->dev_private;
284 293
285 /* CRT should always be at 0, but check anyway */ 294 /* CRT should always be at 0, but check anyway */
286 if (intel_encoder->type != INTEL_OUTPUT_ANALOG) 295 if (crt->base.type != INTEL_OUTPUT_ANALOG)
287 return false; 296 return false;
288 297
289 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) { 298 if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
@@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
291 return true; 300 return true;
292 } 301 }
293 302
294 if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) { 303 if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
295 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); 304 DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
296 return true; 305 return true;
297 } 306 }
@@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
300} 309}
301 310
302static enum drm_connector_status 311static enum drm_connector_status
303intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder) 312intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
304{ 313{
305 struct drm_encoder *encoder = &intel_encoder->base; 314 struct drm_encoder *encoder = &crt->base.base;
306 struct drm_device *dev = encoder->dev; 315 struct drm_device *dev = encoder->dev;
307 struct drm_i915_private *dev_priv = dev->dev_private; 316 struct drm_i915_private *dev_priv = dev->dev_private;
308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 317 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -434,7 +443,7 @@ static enum drm_connector_status
434intel_crt_detect(struct drm_connector *connector, bool force) 443intel_crt_detect(struct drm_connector *connector, bool force)
435{ 444{
436 struct drm_device *dev = connector->dev; 445 struct drm_device *dev = connector->dev;
437 struct intel_encoder *encoder = intel_attached_encoder(connector); 446 struct intel_crt *crt = intel_attached_crt(connector);
438 struct drm_crtc *crtc; 447 struct drm_crtc *crtc;
439 int dpms_mode; 448 int dpms_mode;
440 enum drm_connector_status status; 449 enum drm_connector_status status;
@@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force)
443 if (intel_crt_detect_hotplug(connector)) { 452 if (intel_crt_detect_hotplug(connector)) {
444 DRM_DEBUG_KMS("CRT detected via hotplug\n"); 453 DRM_DEBUG_KMS("CRT detected via hotplug\n");
445 return connector_status_connected; 454 return connector_status_connected;
446 } else 455 } else {
456 DRM_DEBUG_KMS("CRT not detected via hotplug\n");
447 return connector_status_disconnected; 457 return connector_status_disconnected;
458 }
448 } 459 }
449 460
450 if (intel_crt_detect_ddc(&encoder->base)) 461 if (intel_crt_detect_ddc(crt))
451 return connector_status_connected; 462 return connector_status_connected;
452 463
453 if (!force) 464 if (!force)
454 return connector->status; 465 return connector->status;
455 466
456 /* for pre-945g platforms use load detect */ 467 /* for pre-945g platforms use load detect */
457 if (encoder->base.crtc && encoder->base.crtc->enabled) { 468 crtc = crt->base.base.crtc;
458 status = intel_crt_load_detect(encoder->base.crtc, encoder); 469 if (crtc && crtc->enabled) {
470 status = intel_crt_load_detect(crtc, crt);
459 } else { 471 } else {
460 crtc = intel_get_load_detect_pipe(encoder, connector, 472 crtc = intel_get_load_detect_pipe(&crt->base, connector,
461 NULL, &dpms_mode); 473 NULL, &dpms_mode);
462 if (crtc) { 474 if (crtc) {
463 if (intel_crt_detect_ddc(&encoder->base)) 475 if (intel_crt_detect_ddc(crt))
464 status = connector_status_connected; 476 status = connector_status_connected;
465 else 477 else
466 status = intel_crt_load_detect(crtc, encoder); 478 status = intel_crt_load_detect(crtc, crt);
467 intel_release_load_detect_pipe(encoder, 479 intel_release_load_detect_pipe(&crt->base,
468 connector, dpms_mode); 480 connector, dpms_mode);
469 } else 481 } else
470 status = connector_status_unknown; 482 status = connector_status_unknown;
@@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
536void intel_crt_init(struct drm_device *dev) 548void intel_crt_init(struct drm_device *dev)
537{ 549{
538 struct drm_connector *connector; 550 struct drm_connector *connector;
539 struct intel_encoder *intel_encoder; 551 struct intel_crt *crt;
540 struct intel_connector *intel_connector; 552 struct intel_connector *intel_connector;
541 struct drm_i915_private *dev_priv = dev->dev_private; 553 struct drm_i915_private *dev_priv = dev->dev_private;
542 554
543 intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL); 555 crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
544 if (!intel_encoder) 556 if (!crt)
545 return; 557 return;
546 558
547 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 559 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
548 if (!intel_connector) { 560 if (!intel_connector) {
549 kfree(intel_encoder); 561 kfree(crt);
550 return; 562 return;
551 } 563 }
552 564
@@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev)
554 drm_connector_init(dev, &intel_connector->base, 566 drm_connector_init(dev, &intel_connector->base,
555 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA); 567 &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
556 568
557 drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs, 569 drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
558 DRM_MODE_ENCODER_DAC); 570 DRM_MODE_ENCODER_DAC);
559 571
560 intel_connector_attach_encoder(intel_connector, intel_encoder); 572 intel_connector_attach_encoder(intel_connector, &crt->base);
561 573
562 intel_encoder->type = INTEL_OUTPUT_ANALOG; 574 crt->base.type = INTEL_OUTPUT_ANALOG;
563 intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 575 crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
564 (1 << INTEL_ANALOG_CLONE_BIT) | 576 1 << INTEL_ANALOG_CLONE_BIT |
565 (1 << INTEL_SDVO_LVDS_CLONE_BIT); 577 1 << INTEL_SDVO_LVDS_CLONE_BIT);
566 intel_encoder->crtc_mask = (1 << 0) | (1 << 1); 578 crt->base.crtc_mask = (1 << 0) | (1 << 1);
567 connector->interlace_allowed = 1; 579 connector->interlace_allowed = 1;
568 connector->doublescan_allowed = 0; 580 connector->doublescan_allowed = 0;
569 581
570 drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs); 582 drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
571 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs); 583 drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
572 584
573 drm_sysfs_connector_add(connector); 585 drm_sysfs_connector_add(connector);
@@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev)
577 else 589 else
578 connector->polled = DRM_CONNECTOR_POLL_CONNECT; 590 connector->polled = DRM_CONNECTOR_POLL_CONNECT;
579 591
592 /*
593 * Configure the automatic hotplug detection stuff
594 */
595 crt->force_hotplug_required = 0;
596 if (HAS_PCH_SPLIT(dev)) {
597 u32 adpa;
598
599 adpa = I915_READ(PCH_ADPA);
600 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
601 adpa |= ADPA_HOTPLUG_BITS;
602 I915_WRITE(PCH_ADPA, adpa);
603 POSTING_READ(PCH_ADPA);
604
605 DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
606 crt->force_hotplug_required = 1;
607 }
608
580 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS; 609 dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
581} 610}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b2..d9b7092439ef 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
1611 1611
1612 wait_event(dev_priv->pending_flip_queue, 1612 wait_event(dev_priv->pending_flip_queue,
1613 atomic_read(&obj_priv->pending_flip) == 0); 1613 atomic_read(&obj_priv->pending_flip) == 0);
1614
1615 /* Big Hammer, we also need to ensure that any pending
1616 * MI_WAIT_FOR_EVENT inside a user batch buffer on the
1617 * current scanout is retired before unpinning the old
1618 * framebuffer.
1619 */
1620 ret = i915_gem_object_flush_gpu(obj_priv, false);
1621 if (ret) {
1622 i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
1623 mutex_unlock(&dev->struct_mutex);
1624 return ret;
1625 }
1614 } 1626 }
1615 1627
1616 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y, 1628 ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1681,6 +1693,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
1681 udelay(500); 1693 udelay(500);
1682} 1694}
1683 1695
1696static void intel_fdi_normal_train(struct drm_crtc *crtc)
1697{
1698 struct drm_device *dev = crtc->dev;
1699 struct drm_i915_private *dev_priv = dev->dev_private;
1700 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1701 int pipe = intel_crtc->pipe;
1702 u32 reg, temp;
1703
1704 /* enable normal train */
1705 reg = FDI_TX_CTL(pipe);
1706 temp = I915_READ(reg);
1707 temp &= ~FDI_LINK_TRAIN_NONE;
1708 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1709 I915_WRITE(reg, temp);
1710
1711 reg = FDI_RX_CTL(pipe);
1712 temp = I915_READ(reg);
1713 if (HAS_PCH_CPT(dev)) {
1714 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1715 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1716 } else {
1717 temp &= ~FDI_LINK_TRAIN_NONE;
1718 temp |= FDI_LINK_TRAIN_NONE;
1719 }
1720 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1721
1722 /* wait one idle pattern time */
1723 POSTING_READ(reg);
1724 udelay(1000);
1725}
1726
1684/* The FDI link training functions for ILK/Ibexpeak. */ 1727/* The FDI link training functions for ILK/Ibexpeak. */
1685static void ironlake_fdi_link_train(struct drm_crtc *crtc) 1728static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1686{ 1729{
@@ -1767,27 +1810,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
1767 1810
1768 DRM_DEBUG_KMS("FDI train done\n"); 1811 DRM_DEBUG_KMS("FDI train done\n");
1769 1812
1770 /* enable normal train */
1771 reg = FDI_TX_CTL(pipe);
1772 temp = I915_READ(reg);
1773 temp &= ~FDI_LINK_TRAIN_NONE;
1774 temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
1775 I915_WRITE(reg, temp);
1776
1777 reg = FDI_RX_CTL(pipe);
1778 temp = I915_READ(reg);
1779 if (HAS_PCH_CPT(dev)) {
1780 temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
1781 temp |= FDI_LINK_TRAIN_NORMAL_CPT;
1782 } else {
1783 temp &= ~FDI_LINK_TRAIN_NONE;
1784 temp |= FDI_LINK_TRAIN_NONE;
1785 }
1786 I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
1787
1788 /* wait one idle pattern time */
1789 POSTING_READ(reg);
1790 udelay(1000);
1791} 1813}
1792 1814
1793static const int const snb_b_fdi_train_param [] = { 1815static const int const snb_b_fdi_train_param [] = {
@@ -2090,15 +2112,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
2090 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe))); 2112 I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
2091 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe))); 2113 I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
2092 2114
2115 intel_fdi_normal_train(crtc);
2116
2093 /* For PCH DP, enable TRANS_DP_CTL */ 2117 /* For PCH DP, enable TRANS_DP_CTL */
2094 if (HAS_PCH_CPT(dev) && 2118 if (HAS_PCH_CPT(dev) &&
2095 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2119 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
2096 reg = TRANS_DP_CTL(pipe); 2120 reg = TRANS_DP_CTL(pipe);
2097 temp = I915_READ(reg); 2121 temp = I915_READ(reg);
2098 temp &= ~(TRANS_DP_PORT_SEL_MASK | 2122 temp &= ~(TRANS_DP_PORT_SEL_MASK |
2099 TRANS_DP_SYNC_MASK); 2123 TRANS_DP_SYNC_MASK |
2124 TRANS_DP_BPC_MASK);
2100 temp |= (TRANS_DP_OUTPUT_ENABLE | 2125 temp |= (TRANS_DP_OUTPUT_ENABLE |
2101 TRANS_DP_ENH_FRAMING); 2126 TRANS_DP_ENH_FRAMING);
2127 temp |= TRANS_DP_8BPC;
2102 2128
2103 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC) 2129 if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
2104 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH; 2130 temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2200,9 +2226,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
2200 udelay(100); 2226 udelay(100);
2201 2227
2202 /* Ironlake workaround, disable clock pointer after downing FDI */ 2228 /* Ironlake workaround, disable clock pointer after downing FDI */
2203 I915_WRITE(FDI_RX_CHICKEN(pipe), 2229 if (HAS_PCH_IBX(dev))
2204 I915_READ(FDI_RX_CHICKEN(pipe) & 2230 I915_WRITE(FDI_RX_CHICKEN(pipe),
2205 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE)); 2231 I915_READ(FDI_RX_CHICKEN(pipe) &
2232 ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
2206 2233
2207 /* still set train pattern 1 */ 2234 /* still set train pattern 1 */
2208 reg = FDI_TX_CTL(pipe); 2235 reg = FDI_TX_CTL(pipe);
@@ -2687,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
2687 } 2714 }
2688} 2715}
2689 2716
2690#define DATA_N 0x800000
2691#define LINK_N 0x80000
2692
2693static void 2717static void
2694ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock, 2718ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
2695 int link_clock, struct fdi_m_n *m_n) 2719 int link_clock, struct fdi_m_n *m_n)
2696{ 2720{
2697 u64 temp;
2698
2699 m_n->tu = 64; /* default size */ 2721 m_n->tu = 64; /* default size */
2700 2722
2701 temp = (u64) DATA_N * pixel_clock; 2723 /* BUG_ON(pixel_clock > INT_MAX / 36); */
2702 temp = div_u64(temp, link_clock); 2724 m_n->gmch_m = bits_per_pixel * pixel_clock;
2703 m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes); 2725 m_n->gmch_n = link_clock * nlanes * 8;
2704 m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
2705 m_n->gmch_n = DATA_N;
2706 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 2726 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
2707 2727
2708 temp = (u64) LINK_N * pixel_clock; 2728 m_n->link_m = pixel_clock;
2709 m_n->link_m = div_u64(temp, link_clock); 2729 m_n->link_n = link_clock;
2710 m_n->link_n = LINK_N;
2711 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n); 2730 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
2712} 2731}
2713 2732
@@ -3691,6 +3710,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3691 3710
3692 /* FDI link */ 3711 /* FDI link */
3693 if (HAS_PCH_SPLIT(dev)) { 3712 if (HAS_PCH_SPLIT(dev)) {
3713 int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
3694 int lane = 0, link_bw, bpp; 3714 int lane = 0, link_bw, bpp;
3695 /* CPU eDP doesn't require FDI link, so just set DP M/N 3715 /* CPU eDP doesn't require FDI link, so just set DP M/N
3696 according to current link config */ 3716 according to current link config */
@@ -3774,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
3774 3794
3775 intel_crtc->fdi_lanes = lane; 3795 intel_crtc->fdi_lanes = lane;
3776 3796
3797 if (pixel_multiplier > 1)
3798 link_bw *= pixel_multiplier;
3777 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n); 3799 ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
3778 } 3800 }
3779 3801
@@ -5211,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
5211 .page_flip = intel_crtc_page_flip, 5233 .page_flip = intel_crtc_page_flip,
5212}; 5234};
5213 5235
5236static void intel_sanitize_modesetting(struct drm_device *dev,
5237 int pipe, int plane)
5238{
5239 struct drm_i915_private *dev_priv = dev->dev_private;
5240 u32 reg, val;
5241
5242 if (HAS_PCH_SPLIT(dev))
5243 return;
5244
5245 /* Who knows what state these registers were left in by the BIOS or
5246 * grub?
5247 *
5248 * If we leave the registers in a conflicting state (e.g. with the
5249 * display plane reading from the other pipe than the one we intend
5250 * to use) then when we attempt to teardown the active mode, we will
5251 * not disable the pipes and planes in the correct order -- leaving
5252 * a plane reading from a disabled pipe and possibly leading to
5253 * undefined behaviour.
5254 */
5255
5256 reg = DSPCNTR(plane);
5257 val = I915_READ(reg);
5258
5259 if ((val & DISPLAY_PLANE_ENABLE) == 0)
5260 return;
5261 if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
5262 return;
5263
5264 /* This display plane is active and attached to the other CPU pipe. */
5265 pipe = !pipe;
5266
5267 /* Disable the plane and wait for it to stop reading from the pipe. */
5268 I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
5269 intel_flush_display_plane(dev, plane);
5270
5271 if (IS_GEN2(dev))
5272 intel_wait_for_vblank(dev, pipe);
5273
5274 if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
5275 return;
5276
5277 /* Switch off the pipe. */
5278 reg = PIPECONF(pipe);
5279 val = I915_READ(reg);
5280 if (val & PIPECONF_ENABLE) {
5281 I915_WRITE(reg, val & ~PIPECONF_ENABLE);
5282 intel_wait_for_pipe_off(dev, pipe);
5283 }
5284}
5214 5285
5215static void intel_crtc_init(struct drm_device *dev, int pipe) 5286static void intel_crtc_init(struct drm_device *dev, int pipe)
5216{ 5287{
@@ -5262,6 +5333,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
5262 5333
5263 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer, 5334 setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
5264 (unsigned long)intel_crtc); 5335 (unsigned long)intel_crtc);
5336
5337 intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
5265} 5338}
5266 5339
5267int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data, 5340int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -5311,9 +5384,14 @@ static void intel_setup_outputs(struct drm_device *dev)
5311 struct drm_i915_private *dev_priv = dev->dev_private; 5384 struct drm_i915_private *dev_priv = dev->dev_private;
5312 struct intel_encoder *encoder; 5385 struct intel_encoder *encoder;
5313 bool dpd_is_edp = false; 5386 bool dpd_is_edp = false;
5387 bool has_lvds = false;
5314 5388
5315 if (IS_MOBILE(dev) && !IS_I830(dev)) 5389 if (IS_MOBILE(dev) && !IS_I830(dev))
5316 intel_lvds_init(dev); 5390 has_lvds = intel_lvds_init(dev);
5391 if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
5392 /* disable the panel fitter on everything but LVDS */
5393 I915_WRITE(PFIT_CONTROL, 0);
5394 }
5317 5395
5318 if (HAS_PCH_SPLIT(dev)) { 5396 if (HAS_PCH_SPLIT(dev)) {
5319 dpd_is_edp = intel_dpd_is_edp(dev); 5397 dpd_is_edp = intel_dpd_is_edp(dev);
@@ -5581,20 +5659,19 @@ void ironlake_enable_drps(struct drm_device *dev)
5581 fmin = (rgvmodectl & MEMMODE_FMIN_MASK); 5659 fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
5582 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >> 5660 fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
5583 MEMMODE_FSTART_SHIFT; 5661 MEMMODE_FSTART_SHIFT;
5584 fstart = fmax;
5585 5662
5586 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >> 5663 vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
5587 PXVFREQ_PX_SHIFT; 5664 PXVFREQ_PX_SHIFT;
5588 5665
5589 dev_priv->fmax = fstart; /* IPS callback will increase this */ 5666 dev_priv->fmax = fmax; /* IPS callback will increase this */
5590 dev_priv->fstart = fstart; 5667 dev_priv->fstart = fstart;
5591 5668
5592 dev_priv->max_delay = fmax; 5669 dev_priv->max_delay = fstart;
5593 dev_priv->min_delay = fmin; 5670 dev_priv->min_delay = fmin;
5594 dev_priv->cur_delay = fstart; 5671 dev_priv->cur_delay = fstart;
5595 5672
5596 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin, 5673 DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
5597 fstart); 5674 fmax, fmin, fstart);
5598 5675
5599 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN); 5676 I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
5600 5677
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b1..df648cb4c296 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -584,17 +584,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
584 mode->clock = dev_priv->panel_fixed_mode->clock; 584 mode->clock = dev_priv->panel_fixed_mode->clock;
585 } 585 }
586 586
587 /* Just use VBT values for eDP */
588 if (is_edp(intel_dp)) {
589 intel_dp->lane_count = dev_priv->edp.lanes;
590 intel_dp->link_bw = dev_priv->edp.rate;
591 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
592 DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
593 intel_dp->link_bw, intel_dp->lane_count,
594 adjusted_mode->clock);
595 return true;
596 }
597
598 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 587 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
599 for (clock = 0; clock <= max_clock; clock++) { 588 for (clock = 0; clock <= max_clock; clock++) {
600 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 589 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -613,6 +602,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
613 } 602 }
614 } 603 }
615 604
605 if (is_edp(intel_dp)) {
606 /* okay we failed just pick the highest */
607 intel_dp->lane_count = max_lane_count;
608 intel_dp->link_bw = bws[max_clock];
609 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
610 DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
611 "count %d clock %d\n",
612 intel_dp->link_bw, intel_dp->lane_count,
613 adjusted_mode->clock);
614
615 return true;
616 }
617
616 return false; 618 return false;
617} 619}
618 620
@@ -1087,21 +1089,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1087} 1089}
1088 1090
1089static uint32_t 1091static uint32_t
1090intel_dp_signal_levels(struct intel_dp *intel_dp) 1092intel_dp_signal_levels(uint8_t train_set, int lane_count)
1091{ 1093{
1092 struct drm_device *dev = intel_dp->base.base.dev; 1094 uint32_t signal_levels = 0;
1093 struct drm_i915_private *dev_priv = dev->dev_private;
1094 uint32_t signal_levels = 0;
1095 u8 train_set = intel_dp->train_set[0];
1096 u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
1097 u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
1098 1095
1099 if (is_edp(intel_dp)) { 1096 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
1100 vswing = dev_priv->edp.vswing;
1101 preemphasis = dev_priv->edp.preemphasis;
1102 }
1103
1104 switch (vswing) {
1105 case DP_TRAIN_VOLTAGE_SWING_400: 1097 case DP_TRAIN_VOLTAGE_SWING_400:
1106 default: 1098 default:
1107 signal_levels |= DP_VOLTAGE_0_4; 1099 signal_levels |= DP_VOLTAGE_0_4;
@@ -1116,7 +1108,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp)
1116 signal_levels |= DP_VOLTAGE_1_2; 1108 signal_levels |= DP_VOLTAGE_1_2;
1117 break; 1109 break;
1118 } 1110 }
1119 switch (preemphasis) { 1111 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
1120 case DP_TRAIN_PRE_EMPHASIS_0: 1112 case DP_TRAIN_PRE_EMPHASIS_0:
1121 default: 1113 default:
1122 signal_levels |= DP_PRE_EMPHASIS_0; 1114 signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1203,18 +1195,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
1203} 1195}
1204 1196
1205static bool 1197static bool
1206intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
1207{
1208 struct drm_device *dev = intel_dp->base.base.dev;
1209 struct drm_i915_private *dev_priv = dev->dev_private;
1210
1211 if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
1212 return false;
1213
1214 return true;
1215}
1216
1217static bool
1218intel_dp_set_link_train(struct intel_dp *intel_dp, 1198intel_dp_set_link_train(struct intel_dp *intel_dp,
1219 uint32_t dp_reg_value, 1199 uint32_t dp_reg_value,
1220 uint8_t dp_train_pat) 1200 uint8_t dp_train_pat)
@@ -1226,9 +1206,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1226 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1206 I915_WRITE(intel_dp->output_reg, dp_reg_value);
1227 POSTING_READ(intel_dp->output_reg); 1207 POSTING_READ(intel_dp->output_reg);
1228 1208
1229 if (!intel_dp_aux_handshake_required(intel_dp))
1230 return true;
1231
1232 intel_dp_aux_native_write_1(intel_dp, 1209 intel_dp_aux_native_write_1(intel_dp,
1233 DP_TRAINING_PATTERN_SET, 1210 DP_TRAINING_PATTERN_SET,
1234 dp_train_pat); 1211 dp_train_pat);
@@ -1261,11 +1238,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1261 POSTING_READ(intel_dp->output_reg); 1238 POSTING_READ(intel_dp->output_reg);
1262 intel_wait_for_vblank(dev, intel_crtc->pipe); 1239 intel_wait_for_vblank(dev, intel_crtc->pipe);
1263 1240
1264 if (intel_dp_aux_handshake_required(intel_dp)) 1241 /* Write the link configuration data */
1265 /* Write the link configuration data */ 1242 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
1266 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1243 intel_dp->link_configuration,
1267 intel_dp->link_configuration, 1244 DP_LINK_CONFIGURATION_SIZE);
1268 DP_LINK_CONFIGURATION_SIZE);
1269 1245
1270 DP |= DP_PORT_EN; 1246 DP |= DP_PORT_EN;
1271 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) 1247 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
@@ -1283,7 +1259,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1283 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1259 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1284 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1260 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1285 } else { 1261 } else {
1286 signal_levels = intel_dp_signal_levels(intel_dp); 1262 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1287 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1263 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1288 } 1264 }
1289 1265
@@ -1297,37 +1273,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1297 break; 1273 break;
1298 /* Set training pattern 1 */ 1274 /* Set training pattern 1 */
1299 1275
1300 udelay(500); 1276 udelay(100);
1301 if (intel_dp_aux_handshake_required(intel_dp)) { 1277 if (!intel_dp_get_link_status(intel_dp))
1302 break; 1278 break;
1303 } else {
1304 if (!intel_dp_get_link_status(intel_dp))
1305 break;
1306 1279
1307 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1280 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
1308 clock_recovery = true; 1281 clock_recovery = true;
1309 break; 1282 break;
1310 } 1283 }
1311 1284
1312 /* Check to see if we've tried the max voltage */ 1285 /* Check to see if we've tried the max voltage */
1313 for (i = 0; i < intel_dp->lane_count; i++) 1286 for (i = 0; i < intel_dp->lane_count; i++)
1314 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1287 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1315 break;
1316 if (i == intel_dp->lane_count)
1317 break; 1288 break;
1289 if (i == intel_dp->lane_count)
1290 break;
1318 1291
1319 /* Check to see if we've tried the same voltage 5 times */ 1292 /* Check to see if we've tried the same voltage 5 times */
1320 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1293 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1321 ++tries; 1294 ++tries;
1322 if (tries == 5) 1295 if (tries == 5)
1323 break; 1296 break;
1324 } else 1297 } else
1325 tries = 0; 1298 tries = 0;
1326 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1299 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1327 1300
1328 /* Compute new intel_dp->train_set as requested by target */ 1301 /* Compute new intel_dp->train_set as requested by target */
1329 intel_get_adjust_train(intel_dp); 1302 intel_get_adjust_train(intel_dp);
1330 }
1331 } 1303 }
1332 1304
1333 intel_dp->DP = DP; 1305 intel_dp->DP = DP;
@@ -1354,7 +1326,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1354 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1326 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1355 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1327 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1356 } else { 1328 } else {
1357 signal_levels = intel_dp_signal_levels(intel_dp); 1329 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
1358 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1330 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1359 } 1331 }
1360 1332
@@ -1368,28 +1340,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1368 DP_TRAINING_PATTERN_2)) 1340 DP_TRAINING_PATTERN_2))
1369 break; 1341 break;
1370 1342
1371 udelay(500); 1343 udelay(400);
1372 1344 if (!intel_dp_get_link_status(intel_dp))
1373 if (!intel_dp_aux_handshake_required(intel_dp)) {
1374 break; 1345 break;
1375 } else {
1376 if (!intel_dp_get_link_status(intel_dp))
1377 break;
1378 1346
1379 if (intel_channel_eq_ok(intel_dp)) { 1347 if (intel_channel_eq_ok(intel_dp)) {
1380 channel_eq = true; 1348 channel_eq = true;
1381 break; 1349 break;
1382 } 1350 }
1383 1351
1384 /* Try 5 times */ 1352 /* Try 5 times */
1385 if (tries > 5) 1353 if (tries > 5)
1386 break; 1354 break;
1387 1355
1388 /* Compute new intel_dp->train_set as requested by target */ 1356 /* Compute new intel_dp->train_set as requested by target */
1389 intel_get_adjust_train(intel_dp); 1357 intel_get_adjust_train(intel_dp);
1390 ++tries; 1358 ++tries;
1391 }
1392 } 1359 }
1360
1393 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp)) 1361 if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
1394 reg = DP | DP_LINK_TRAIN_OFF_CPT; 1362 reg = DP | DP_LINK_TRAIN_OFF_CPT;
1395 else 1363 else
@@ -1408,6 +1376,9 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1408 struct drm_i915_private *dev_priv = dev->dev_private; 1376 struct drm_i915_private *dev_priv = dev->dev_private;
1409 uint32_t DP = intel_dp->DP; 1377 uint32_t DP = intel_dp->DP;
1410 1378
1379 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
1380 return;
1381
1411 DRM_DEBUG_KMS("\n"); 1382 DRM_DEBUG_KMS("\n");
1412 1383
1413 if (is_edp(intel_dp)) { 1384 if (is_edp(intel_dp)) {
@@ -1430,6 +1401,28 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1430 1401
1431 if (is_edp(intel_dp)) 1402 if (is_edp(intel_dp))
1432 DP |= DP_LINK_TRAIN_OFF; 1403 DP |= DP_LINK_TRAIN_OFF;
1404
1405 if (!HAS_PCH_CPT(dev) &&
1406 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
1407 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
1408 /* Hardware workaround: leaving our transcoder select
1409 * set to transcoder B while it's off will prevent the
1410 * corresponding HDMI output on transcoder A.
1411 *
1412 * Combine this with another hardware workaround:
1413 * transcoder select bit can only be cleared while the
1414 * port is enabled.
1415 */
1416 DP &= ~DP_PIPEB_SELECT;
1417 I915_WRITE(intel_dp->output_reg, DP);
1418
1419 /* Changes to enable or select take place the vblank
1420 * after being written.
1421 */
1422 intel_wait_for_vblank(intel_dp->base.base.dev,
1423 intel_crtc->pipe);
1424 }
1425
1433 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1426 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
1434 POSTING_READ(intel_dp->output_reg); 1427 POSTING_READ(intel_dp->output_reg);
1435} 1428}
@@ -1517,7 +1510,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
1517 status = connector_status_connected; 1510 status = connector_status_connected;
1518 } 1511 }
1519 1512
1520 return bit; 1513 return status;
1521} 1514}
1522 1515
1523/** 1516/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a8765..e52c6125bb1f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -237,7 +237,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
237extern void intel_dvo_init(struct drm_device *dev); 237extern void intel_dvo_init(struct drm_device *dev);
238extern void intel_tv_init(struct drm_device *dev); 238extern void intel_tv_init(struct drm_device *dev);
239extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj); 239extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
240extern void intel_lvds_init(struct drm_device *dev); 240extern bool intel_lvds_init(struct drm_device *dev);
241extern void intel_dp_init(struct drm_device *dev, int dp_reg); 241extern void intel_dp_init(struct drm_device *dev, int dp_reg);
242void 242void
243intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 243intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
296extern void intel_init_clock_gating(struct drm_device *dev); 296extern void intel_init_clock_gating(struct drm_device *dev);
297extern void ironlake_enable_drps(struct drm_device *dev); 297extern void ironlake_enable_drps(struct drm_device *dev);
298extern void ironlake_disable_drps(struct drm_device *dev); 298extern void ironlake_disable_drps(struct drm_device *dev);
299extern void intel_init_emon(struct drm_device *dev);
299 300
300extern int intel_pin_and_fence_fb_obj(struct drm_device *dev, 301extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
301 struct drm_gem_object *obj, 302 struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2be4f728ed0c..3dba086e7eea 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
160 }; 160 };
161 struct intel_gpio *gpio; 161 struct intel_gpio *gpio;
162 162
163 if (pin < 1 || pin > 7) 163 if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
164 return NULL; 164 return NULL;
165 165
166 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); 166 gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
@@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
172 gpio->reg += PCH_GPIOA - GPIOA; 172 gpio->reg += PCH_GPIOA - GPIOA;
173 gpio->dev_priv = dev_priv; 173 gpio->dev_priv = dev_priv;
174 174
175 snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]); 175 snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
176 "i915 GPIO%c", "?BACDE?F"[pin]);
176 gpio->adapter.owner = THIS_MODULE; 177 gpio->adapter.owner = THIS_MODULE;
177 gpio->adapter.algo_data = &gpio->algo; 178 gpio->adapter.algo_data = &gpio->algo;
178 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; 179 gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
@@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev)
349 "panel", 350 "panel",
350 "dpc", 351 "dpc",
351 "dpb", 352 "dpb",
352 "reserved" 353 "reserved",
353 "dpd", 354 "dpd",
354 }; 355 };
355 struct drm_i915_private *dev_priv = dev->dev_private; 356 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev)
366 bus->adapter.owner = THIS_MODULE; 367 bus->adapter.owner = THIS_MODULE;
367 bus->adapter.class = I2C_CLASS_DDC; 368 bus->adapter.class = I2C_CLASS_DDC;
368 snprintf(bus->adapter.name, 369 snprintf(bus->adapter.name,
369 I2C_NAME_SIZE, 370 sizeof(bus->adapter.name),
370 "gmbus %s", 371 "i915 gmbus %s",
371 names[i]); 372 names[i]);
372 373
373 bus->adapter.dev.parent = &dev->pdev->dev; 374 bus->adapter.dev.parent = &dev->pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea9..25bcedf386fd 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
68/** 68/**
69 * Sets the power state for the panel. 69 * Sets the power state for the panel.
70 */ 70 */
71static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on) 71static void intel_lvds_enable(struct intel_lvds *intel_lvds)
72{ 72{
73 struct drm_device *dev = intel_lvds->base.base.dev; 73 struct drm_device *dev = intel_lvds->base.base.dev;
74 struct drm_i915_private *dev_priv = dev->dev_private; 74 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
82 lvds_reg = LVDS; 82 lvds_reg = LVDS;
83 } 83 }
84 84
85 if (on) { 85 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
86 I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
87 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
88 intel_panel_set_backlight(dev, dev_priv->backlight_level);
89 } else {
90 dev_priv->backlight_level = intel_panel_get_backlight(dev);
91
92 intel_panel_set_backlight(dev, 0);
93 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
94 86
95 if (intel_lvds->pfit_control) { 87 if (intel_lvds->pfit_dirty) {
96 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) 88 /*
97 DRM_ERROR("timed out waiting for panel to power off\n"); 89 * Enable automatic panel scaling so that non-native modes
98 I915_WRITE(PFIT_CONTROL, 0); 90 * fill the screen. The panel fitter should only be
99 intel_lvds->pfit_control = 0; 91 * adjusted whilst the pipe is disabled, according to
92 * register description and PRM.
93 */
94 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
95 intel_lvds->pfit_control,
96 intel_lvds->pfit_pgm_ratios);
97 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
98 DRM_ERROR("timed out waiting for panel to power off\n");
99 } else {
100 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
101 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
100 intel_lvds->pfit_dirty = false; 102 intel_lvds->pfit_dirty = false;
101 } 103 }
104 }
105
106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
107 POSTING_READ(lvds_reg);
108
109 intel_panel_set_backlight(dev, dev_priv->backlight_level);
110}
111
112static void intel_lvds_disable(struct intel_lvds *intel_lvds)
113{
114 struct drm_device *dev = intel_lvds->base.base.dev;
115 struct drm_i915_private *dev_priv = dev->dev_private;
116 u32 ctl_reg, lvds_reg;
117
118 if (HAS_PCH_SPLIT(dev)) {
119 ctl_reg = PCH_PP_CONTROL;
120 lvds_reg = PCH_LVDS;
121 } else {
122 ctl_reg = PP_CONTROL;
123 lvds_reg = LVDS;
124 }
125
126 dev_priv->backlight_level = intel_panel_get_backlight(dev);
127 intel_panel_set_backlight(dev, 0);
102 128
103 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN); 129 I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
130
131 if (intel_lvds->pfit_control) {
132 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
133 DRM_ERROR("timed out waiting for panel to power off\n");
134
135 I915_WRITE(PFIT_CONTROL, 0);
136 intel_lvds->pfit_dirty = true;
104 } 137 }
138
139 I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
105 POSTING_READ(lvds_reg); 140 POSTING_READ(lvds_reg);
106} 141}
107 142
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
110 struct intel_lvds *intel_lvds = to_intel_lvds(encoder); 145 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
111 146
112 if (mode == DRM_MODE_DPMS_ON) 147 if (mode == DRM_MODE_DPMS_ON)
113 intel_lvds_set_power(intel_lvds, true); 148 intel_lvds_enable(intel_lvds);
114 else 149 else
115 intel_lvds_set_power(intel_lvds, false); 150 intel_lvds_disable(intel_lvds);
116 151
117 /* XXX: We never power down the LVDS pairs. */ 152 /* XXX: We never power down the LVDS pairs. */
118} 153}
@@ -411,43 +446,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
411 /* Always do a full power on as we do not know what state 446 /* Always do a full power on as we do not know what state
412 * we were left in. 447 * we were left in.
413 */ 448 */
414 intel_lvds_set_power(intel_lvds, true); 449 intel_lvds_enable(intel_lvds);
415} 450}
416 451
417static void intel_lvds_mode_set(struct drm_encoder *encoder, 452static void intel_lvds_mode_set(struct drm_encoder *encoder,
418 struct drm_display_mode *mode, 453 struct drm_display_mode *mode,
419 struct drm_display_mode *adjusted_mode) 454 struct drm_display_mode *adjusted_mode)
420{ 455{
421 struct drm_device *dev = encoder->dev;
422 struct drm_i915_private *dev_priv = dev->dev_private;
423 struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
424
425 /* 456 /*
426 * The LVDS pin pair will already have been turned on in the 457 * The LVDS pin pair will already have been turned on in the
427 * intel_crtc_mode_set since it has a large impact on the DPLL 458 * intel_crtc_mode_set since it has a large impact on the DPLL
428 * settings. 459 * settings.
429 */ 460 */
430
431 if (HAS_PCH_SPLIT(dev))
432 return;
433
434 if (!intel_lvds->pfit_dirty)
435 return;
436
437 /*
438 * Enable automatic panel scaling so that non-native modes fill the
439 * screen. Should be enabled before the pipe is enabled, according to
440 * register description and PRM.
441 */
442 DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
443 intel_lvds->pfit_control,
444 intel_lvds->pfit_pgm_ratios);
445 if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
446 DRM_ERROR("timed out waiting for panel to power off\n");
447
448 I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
449 I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
450 intel_lvds->pfit_dirty = false;
451} 461}
452 462
453/** 463/**
@@ -481,11 +491,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
481 struct drm_device *dev = connector->dev; 491 struct drm_device *dev = connector->dev;
482 struct drm_display_mode *mode; 492 struct drm_display_mode *mode;
483 493
484 if (intel_lvds->edid) { 494 if (intel_lvds->edid)
485 drm_mode_connector_update_edid_property(connector,
486 intel_lvds->edid);
487 return drm_add_edid_modes(connector, intel_lvds->edid); 495 return drm_add_edid_modes(connector, intel_lvds->edid);
488 }
489 496
490 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode); 497 mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
491 if (mode == 0) 498 if (mode == 0)
@@ -840,7 +847,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
840 * Create the connector, register the LVDS DDC bus, and try to figure out what 847 * Create the connector, register the LVDS DDC bus, and try to figure out what
841 * modes we can display on the LVDS panel (if present). 848 * modes we can display on the LVDS panel (if present).
842 */ 849 */
843void intel_lvds_init(struct drm_device *dev) 850bool intel_lvds_init(struct drm_device *dev)
844{ 851{
845 struct drm_i915_private *dev_priv = dev->dev_private; 852 struct drm_i915_private *dev_priv = dev->dev_private;
846 struct intel_lvds *intel_lvds; 853 struct intel_lvds *intel_lvds;
@@ -856,37 +863,37 @@ void intel_lvds_init(struct drm_device *dev)
856 863
857 /* Skip init on machines we know falsely report LVDS */ 864 /* Skip init on machines we know falsely report LVDS */
858 if (dmi_check_system(intel_no_lvds)) 865 if (dmi_check_system(intel_no_lvds))
859 return; 866 return false;
860 867
861 pin = GMBUS_PORT_PANEL; 868 pin = GMBUS_PORT_PANEL;
862 if (!lvds_is_present_in_vbt(dev, &pin)) { 869 if (!lvds_is_present_in_vbt(dev, &pin)) {
863 DRM_DEBUG_KMS("LVDS is not present in VBT\n"); 870 DRM_DEBUG_KMS("LVDS is not present in VBT\n");
864 return; 871 return false;
865 } 872 }
866 873
867 if (HAS_PCH_SPLIT(dev)) { 874 if (HAS_PCH_SPLIT(dev)) {
868 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0) 875 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
869 return; 876 return false;
870 if (dev_priv->edp.support) { 877 if (dev_priv->edp.support) {
871 DRM_DEBUG_KMS("disable LVDS for eDP support\n"); 878 DRM_DEBUG_KMS("disable LVDS for eDP support\n");
872 return; 879 return false;
873 } 880 }
874 } 881 }
875 882
876 if (!intel_lvds_ddc_probe(dev, pin)) { 883 if (!intel_lvds_ddc_probe(dev, pin)) {
877 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n"); 884 DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
878 return; 885 return false;
879 } 886 }
880 887
881 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL); 888 intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
882 if (!intel_lvds) { 889 if (!intel_lvds) {
883 return; 890 return false;
884 } 891 }
885 892
886 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 893 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
887 if (!intel_connector) { 894 if (!intel_connector) {
888 kfree(intel_lvds); 895 kfree(intel_lvds);
889 return; 896 return false;
890 } 897 }
891 898
892 if (!HAS_PCH_SPLIT(dev)) { 899 if (!HAS_PCH_SPLIT(dev)) {
@@ -939,7 +946,16 @@ void intel_lvds_init(struct drm_device *dev)
939 */ 946 */
940 intel_lvds->edid = drm_get_edid(connector, 947 intel_lvds->edid = drm_get_edid(connector,
941 &dev_priv->gmbus[pin].adapter); 948 &dev_priv->gmbus[pin].adapter);
942 949 if (intel_lvds->edid) {
950 if (drm_add_edid_modes(connector,
951 intel_lvds->edid)) {
952 drm_mode_connector_update_edid_property(connector,
953 intel_lvds->edid);
954 } else {
955 kfree(intel_lvds->edid);
956 intel_lvds->edid = NULL;
957 }
958 }
943 if (!intel_lvds->edid) { 959 if (!intel_lvds->edid) {
944 /* Didn't get an EDID, so 960 /* Didn't get an EDID, so
945 * Set wide sync ranges so we get all modes 961 * Set wide sync ranges so we get all modes
@@ -1020,7 +1036,7 @@ out:
1020 /* keep the LVDS connector */ 1036 /* keep the LVDS connector */
1021 dev_priv->int_lvds_connector = connector; 1037 dev_priv->int_lvds_connector = connector;
1022 drm_sysfs_connector_add(connector); 1038 drm_sysfs_connector_add(connector);
1023 return; 1039 return true;
1024 1040
1025failed: 1041failed:
1026 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n"); 1042 DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1028,4 +1044,5 @@ failed:
1028 drm_encoder_cleanup(encoder); 1044 drm_encoder_cleanup(encoder);
1029 kfree(intel_lvds); 1045 kfree(intel_lvds);
1030 kfree(intel_connector); 1046 kfree(intel_connector);
1047 return false;
1031} 1048}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6b..9b0d9a867aea 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
512 return 0; 512 return 0;
513 513
514err_out: 514err_out:
515 iounmap(opregion->header); 515 iounmap(base);
516 return err; 516 return err;
517} 517}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219a..02ff0a481f47 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
946{ 946{
947 int uv_hscale = uv_hsubsampling(rec->flags); 947 int uv_hscale = uv_hsubsampling(rec->flags);
948 int uv_vscale = uv_vsubsampling(rec->flags); 948 int uv_vscale = uv_vsubsampling(rec->flags);
949 u32 stride_mask, depth, tmp; 949 u32 stride_mask;
950 int depth;
951 u32 tmp;
950 952
951 /* check src dimensions */ 953 /* check src dimensions */
952 if (IS_845G(dev) || IS_I830(dev)) { 954 if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae2..89a65be8a3f3 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -156,28 +156,30 @@ static int init_ring_common(struct drm_device *dev,
156 156
157 /* G45 ring initialization fails to reset head to zero */ 157 /* G45 ring initialization fails to reset head to zero */
158 if (head != 0) { 158 if (head != 0) {
159 DRM_ERROR("%s head not reset to zero " 159 DRM_DEBUG_KMS("%s head not reset to zero "
160 "ctl %08x head %08x tail %08x start %08x\n", 160 "ctl %08x head %08x tail %08x start %08x\n",
161 ring->name, 161 ring->name,
162 I915_READ_CTL(ring), 162 I915_READ_CTL(ring),
163 I915_READ_HEAD(ring), 163 I915_READ_HEAD(ring),
164 I915_READ_TAIL(ring), 164 I915_READ_TAIL(ring),
165 I915_READ_START(ring)); 165 I915_READ_START(ring));
166 166
167 I915_WRITE_HEAD(ring, 0); 167 I915_WRITE_HEAD(ring, 0);
168 168
169 DRM_ERROR("%s head forced to zero " 169 if (I915_READ_HEAD(ring) & HEAD_ADDR) {
170 "ctl %08x head %08x tail %08x start %08x\n", 170 DRM_ERROR("failed to set %s head to zero "
171 ring->name, 171 "ctl %08x head %08x tail %08x start %08x\n",
172 I915_READ_CTL(ring), 172 ring->name,
173 I915_READ_HEAD(ring), 173 I915_READ_CTL(ring),
174 I915_READ_TAIL(ring), 174 I915_READ_HEAD(ring),
175 I915_READ_START(ring)); 175 I915_READ_TAIL(ring),
176 I915_READ_START(ring));
177 }
176 } 178 }
177 179
178 I915_WRITE_CTL(ring, 180 I915_WRITE_CTL(ring,
179 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES) 181 ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
180 | RING_NO_REPORT | RING_VALID); 182 | RING_REPORT_64K | RING_VALID);
181 183
182 head = I915_READ_HEAD(ring) & HEAD_ADDR; 184 head = I915_READ_HEAD(ring) & HEAD_ADDR;
183 /* If the head is still not zero, the ring is dead */ 185 /* If the head is still not zero, the ring is dead */
@@ -654,6 +656,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
654 i915_gem_object_unpin(ring->gem_object); 656 i915_gem_object_unpin(ring->gem_object);
655 drm_gem_object_unreference(ring->gem_object); 657 drm_gem_object_unreference(ring->gem_object);
656 ring->gem_object = NULL; 658 ring->gem_object = NULL;
659
660 if (ring->cleanup)
661 ring->cleanup(ring);
662
657 cleanup_status_page(dev, ring); 663 cleanup_status_page(dev, ring);
658} 664}
659 665
@@ -688,6 +694,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
688{ 694{
689 unsigned long end; 695 unsigned long end;
690 drm_i915_private_t *dev_priv = dev->dev_private; 696 drm_i915_private_t *dev_priv = dev->dev_private;
697 u32 head;
698
699 head = intel_read_status_page(ring, 4);
700 if (head) {
701 ring->head = head & HEAD_ADDR;
702 ring->space = ring->head - (ring->tail + 8);
703 if (ring->space < 0)
704 ring->space += ring->size;
705 if (ring->space >= n)
706 return 0;
707 }
691 708
692 trace_i915_ring_wait_begin (dev); 709 trace_i915_ring_wait_begin (dev);
693 end = jiffies + 3 * HZ; 710 end = jiffies + 3 * HZ;
@@ -854,19 +871,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
854 /* do nothing */ 871 /* do nothing */
855} 872}
856 873
874
875/* Workaround for some stepping of SNB,
876 * each time when BLT engine ring tail moved,
877 * the first command in the ring to be parsed
878 * should be MI_BATCH_BUFFER_START
879 */
880#define NEED_BLT_WORKAROUND(dev) \
881 (IS_GEN6(dev) && (dev->pdev->revision < 8))
882
883static inline struct drm_i915_gem_object *
884to_blt_workaround(struct intel_ring_buffer *ring)
885{
886 return ring->private;
887}
888
889static int blt_ring_init(struct drm_device *dev,
890 struct intel_ring_buffer *ring)
891{
892 if (NEED_BLT_WORKAROUND(dev)) {
893 struct drm_i915_gem_object *obj;
894 u32 __iomem *ptr;
895 int ret;
896
897 obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
898 if (obj == NULL)
899 return -ENOMEM;
900
901 ret = i915_gem_object_pin(&obj->base, 4096);
902 if (ret) {
903 drm_gem_object_unreference(&obj->base);
904 return ret;
905 }
906
907 ptr = kmap(obj->pages[0]);
908 iowrite32(MI_BATCH_BUFFER_END, ptr);
909 iowrite32(MI_NOOP, ptr+1);
910 kunmap(obj->pages[0]);
911
912 ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
913 if (ret) {
914 i915_gem_object_unpin(&obj->base);
915 drm_gem_object_unreference(&obj->base);
916 return ret;
917 }
918
919 ring->private = obj;
920 }
921
922 return init_ring_common(dev, ring);
923}
924
925static void blt_ring_begin(struct drm_device *dev,
926 struct intel_ring_buffer *ring,
927 int num_dwords)
928{
929 if (ring->private) {
930 intel_ring_begin(dev, ring, num_dwords+2);
931 intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
932 intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
933 } else
934 intel_ring_begin(dev, ring, 4);
935}
936
937static void blt_ring_flush(struct drm_device *dev,
938 struct intel_ring_buffer *ring,
939 u32 invalidate_domains,
940 u32 flush_domains)
941{
942 blt_ring_begin(dev, ring, 4);
943 intel_ring_emit(dev, ring, MI_FLUSH_DW);
944 intel_ring_emit(dev, ring, 0);
945 intel_ring_emit(dev, ring, 0);
946 intel_ring_emit(dev, ring, 0);
947 intel_ring_advance(dev, ring);
948}
949
950static u32
951blt_ring_add_request(struct drm_device *dev,
952 struct intel_ring_buffer *ring,
953 u32 flush_domains)
954{
955 u32 seqno = i915_gem_get_seqno(dev);
956
957 blt_ring_begin(dev, ring, 4);
958 intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
959 intel_ring_emit(dev, ring,
960 I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
961 intel_ring_emit(dev, ring, seqno);
962 intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
963 intel_ring_advance(dev, ring);
964
965 DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
966 return seqno;
967}
968
969static void blt_ring_cleanup(struct intel_ring_buffer *ring)
970{
971 if (!ring->private)
972 return;
973
974 i915_gem_object_unpin(ring->private);
975 drm_gem_object_unreference(ring->private);
976 ring->private = NULL;
977}
978
857static const struct intel_ring_buffer gen6_blt_ring = { 979static const struct intel_ring_buffer gen6_blt_ring = {
858 .name = "blt ring", 980 .name = "blt ring",
859 .id = RING_BLT, 981 .id = RING_BLT,
860 .mmio_base = BLT_RING_BASE, 982 .mmio_base = BLT_RING_BASE,
861 .size = 32 * PAGE_SIZE, 983 .size = 32 * PAGE_SIZE,
862 .init = init_ring_common, 984 .init = blt_ring_init,
863 .write_tail = ring_write_tail, 985 .write_tail = ring_write_tail,
864 .flush = gen6_ring_flush, 986 .flush = blt_ring_flush,
865 .add_request = ring_add_request, 987 .add_request = blt_ring_add_request,
866 .get_seqno = ring_status_page_get_seqno, 988 .get_seqno = ring_status_page_get_seqno,
867 .user_irq_get = blt_ring_get_user_irq, 989 .user_irq_get = blt_ring_get_user_irq,
868 .user_irq_put = blt_ring_put_user_irq, 990 .user_irq_put = blt_ring_put_user_irq,
869 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer, 991 .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
992 .cleanup = blt_ring_cleanup,
870}; 993};
871 994
872int intel_init_render_ring_buffer(struct drm_device *dev) 995int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e5764..3126c2681983 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@ struct intel_ring_buffer {
63 struct drm_i915_gem_execbuffer2 *exec, 63 struct drm_i915_gem_execbuffer2 *exec,
64 struct drm_clip_rect *cliprects, 64 struct drm_clip_rect *cliprects,
65 uint64_t exec_offset); 65 uint64_t exec_offset);
66 void (*cleanup)(struct intel_ring_buffer *ring);
66 67
67 /** 68 /**
68 * List of objects currently involved in rendering from the 69 * List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct intel_ring_buffer {
98 99
99 wait_queue_head_t irq_queue; 100 wait_queue_head_t irq_queue;
100 drm_local_map_t map; 101 drm_local_map_t map;
102
103 void *private;
101}; 104};
102 105
103static inline u32 106static inline u32
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd5..d97e6cb52d34 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
107 * This is set if we treat the device as HDMI, instead of DVI. 107 * This is set if we treat the device as HDMI, instead of DVI.
108 */ 108 */
109 bool is_hdmi; 109 bool is_hdmi;
110 bool has_audio; 110 bool has_hdmi_monitor;
111 bool has_hdmi_audio;
111 112
112 /** 113 /**
113 * This is set if we detect output of sdvo device as LVDS and 114 * This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1023 if (!intel_sdvo_set_target_input(intel_sdvo)) 1024 if (!intel_sdvo_set_target_input(intel_sdvo))
1024 return; 1025 return;
1025 1026
1026 if (intel_sdvo->is_hdmi && 1027 if (intel_sdvo->has_hdmi_monitor &&
1027 !intel_sdvo_set_avi_infoframe(intel_sdvo)) 1028 !intel_sdvo_set_avi_infoframe(intel_sdvo))
1028 return; 1029 return;
1029 1030
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
1063 } 1064 }
1064 if (intel_crtc->pipe == 1) 1065 if (intel_crtc->pipe == 1)
1065 sdvox |= SDVO_PIPE_B_SELECT; 1066 sdvox |= SDVO_PIPE_B_SELECT;
1066 if (intel_sdvo->has_audio) 1067 if (intel_sdvo->has_hdmi_audio)
1067 sdvox |= SDVO_AUDIO_ENABLE; 1068 sdvox |= SDVO_AUDIO_ENABLE;
1068 1069
1069 if (INTEL_INFO(dev)->gen >= 4) { 1070 if (INTEL_INFO(dev)->gen >= 4) {
@@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
1295 return drm_get_edid(connector, &sdvo->ddc); 1296 return drm_get_edid(connector, &sdvo->ddc);
1296} 1297}
1297 1298
1298static struct drm_connector *
1299intel_find_analog_connector(struct drm_device *dev)
1300{
1301 struct drm_connector *connector;
1302 struct intel_sdvo *encoder;
1303
1304 list_for_each_entry(encoder,
1305 &dev->mode_config.encoder_list,
1306 base.base.head) {
1307 if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
1308 list_for_each_entry(connector,
1309 &dev->mode_config.connector_list,
1310 head) {
1311 if (&encoder->base ==
1312 intel_attached_encoder(connector))
1313 return connector;
1314 }
1315 }
1316 }
1317
1318 return NULL;
1319}
1320
1321static int
1322intel_analog_is_connected(struct drm_device *dev)
1323{
1324 struct drm_connector *analog_connector;
1325
1326 analog_connector = intel_find_analog_connector(dev);
1327 if (!analog_connector)
1328 return false;
1329
1330 if (analog_connector->funcs->detect(analog_connector, false) ==
1331 connector_status_disconnected)
1332 return false;
1333
1334 return true;
1335}
1336
1337/* Mac mini hack -- use the same DDC as the analog connector */ 1299/* Mac mini hack -- use the same DDC as the analog connector */
1338static struct edid * 1300static struct edid *
1339intel_sdvo_get_analog_edid(struct drm_connector *connector) 1301intel_sdvo_get_analog_edid(struct drm_connector *connector)
1340{ 1302{
1341 struct drm_i915_private *dev_priv = connector->dev->dev_private; 1303 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1342 1304
1343 if (!intel_analog_is_connected(connector->dev)) 1305 return drm_get_edid(connector,
1344 return NULL; 1306 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1345
1346 return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
1347} 1307}
1348 1308
1349enum drm_connector_status 1309enum drm_connector_status
@@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1388 /* DDC bus is shared, match EDID to connector type */ 1348 /* DDC bus is shared, match EDID to connector type */
1389 if (edid->input & DRM_EDID_INPUT_DIGITAL) { 1349 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1390 status = connector_status_connected; 1350 status = connector_status_connected;
1391 intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid); 1351 if (intel_sdvo->is_hdmi) {
1392 intel_sdvo->has_audio = drm_detect_monitor_audio(edid); 1352 intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
1353 intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
1354 }
1393 } 1355 }
1394 connector->display_info.raw_edid = NULL; 1356 connector->display_info.raw_edid = NULL;
1395 kfree(edid); 1357 kfree(edid);
@@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
1398 if (status == connector_status_connected) { 1360 if (status == connector_status_connected) {
1399 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector); 1361 struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
1400 if (intel_sdvo_connector->force_audio) 1362 if (intel_sdvo_connector->force_audio)
1401 intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0; 1363 intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
1402 } 1364 }
1403 1365
1404 return status; 1366 return status;
@@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
1415 if (!intel_sdvo_write_cmd(intel_sdvo, 1377 if (!intel_sdvo_write_cmd(intel_sdvo,
1416 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0)) 1378 SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
1417 return connector_status_unknown; 1379 return connector_status_unknown;
1418 if (intel_sdvo->is_tv) { 1380
1419 /* add 30ms delay when the output type is SDVO-TV */ 1381 /* add 30ms delay when the output type might be TV */
1382 if (intel_sdvo->caps.output_flags &
1383 (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
1420 mdelay(30); 1384 mdelay(30);
1421 } 1385
1422 if (!intel_sdvo_read_response(intel_sdvo, &response, 2)) 1386 if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
1423 return connector_status_unknown; 1387 return connector_status_unknown;
1424 1388
@@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1472 edid = intel_sdvo_get_analog_edid(connector); 1436 edid = intel_sdvo_get_analog_edid(connector);
1473 1437
1474 if (edid != NULL) { 1438 if (edid != NULL) {
1475 drm_mode_connector_update_edid_property(connector, edid); 1439 if (edid->input & DRM_EDID_INPUT_DIGITAL) {
1476 drm_add_edid_modes(connector, edid); 1440 drm_mode_connector_update_edid_property(connector, edid);
1441 drm_add_edid_modes(connector, edid);
1442 }
1477 connector->display_info.raw_edid = NULL; 1443 connector->display_info.raw_edid = NULL;
1478 kfree(edid); 1444 kfree(edid);
1479 } 1445 }
@@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
1713 1679
1714 intel_sdvo_connector->force_audio = val; 1680 intel_sdvo_connector->force_audio = val;
1715 1681
1716 if (val > 0 && intel_sdvo->has_audio) 1682 if (val > 0 && intel_sdvo->has_hdmi_audio)
1717 return 0; 1683 return 0;
1718 if (val < 0 && !intel_sdvo->has_audio) 1684 if (val < 0 && !intel_sdvo->has_hdmi_audio)
1719 return 0; 1685 return 0;
1720 1686
1721 intel_sdvo->has_audio = val > 0; 1687 intel_sdvo->has_hdmi_audio = val > 0;
1722 goto done; 1688 goto done;
1723 } 1689 }
1724 1690
@@ -2070,6 +2036,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2070 intel_sdvo_set_colorimetry(intel_sdvo, 2036 intel_sdvo_set_colorimetry(intel_sdvo,
2071 SDVO_COLORIMETRY_RGB256); 2037 SDVO_COLORIMETRY_RGB256);
2072 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA; 2038 connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
2039
2040 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2073 intel_sdvo->is_hdmi = true; 2041 intel_sdvo->is_hdmi = true;
2074 } 2042 }
2075 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) | 2043 intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -2077,8 +2045,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
2077 2045
2078 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo); 2046 intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
2079 2047
2080 intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
2081
2082 return true; 2048 return true;
2083} 2049}
2084 2050