aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2009-03-13 17:44:51 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-03-13 17:44:51 -0400
commit97fb44eb6bc01f4ffed4300e475aa15e44877375 (patch)
tree481ed6efd0babe7185cae04f2fd295426b36411d /drivers/gpu/drm/i915
parente4707dd3e9d0cb57597b6568a5e51fea5d6fca41 (diff)
parent148854c65ea8046b045672fd49f4333aefaa3ab5 (diff)
Merge branch 'for-rmk' of git://git.pengutronix.de/git/imx/linux-2.6 into devel
Conflicts: arch/arm/mach-at91/gpio.c
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c60
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c30
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h11
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c270
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c97
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c18
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h10
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c14
-rw-r--r--drivers/gpu/drm/i915/intel_display.c181
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c8
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c51
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c872
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo_regs.h404
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c2
15 files changed, 1683 insertions, 346 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index ee64b7301f67..6dab63bdc4c1 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -202,7 +202,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
202 dev_priv->ring.map.flags = 0; 202 dev_priv->ring.map.flags = 0;
203 dev_priv->ring.map.mtrr = 0; 203 dev_priv->ring.map.mtrr = 0;
204 204
205 drm_core_ioremap(&dev_priv->ring.map, dev); 205 drm_core_ioremap_wc(&dev_priv->ring.map, dev);
206 206
207 if (dev_priv->ring.map.handle == NULL) { 207 if (dev_priv->ring.map.handle == NULL) {
208 i915_dma_cleanup(dev); 208 i915_dma_cleanup(dev);
@@ -731,8 +731,11 @@ static int i915_getparam(struct drm_device *dev, void *data,
731 case I915_PARAM_HAS_GEM: 731 case I915_PARAM_HAS_GEM:
732 value = dev_priv->has_gem; 732 value = dev_priv->has_gem;
733 break; 733 break;
734 case I915_PARAM_NUM_FENCES_AVAIL:
735 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
736 break;
734 default: 737 default:
735 DRM_ERROR("Unknown parameter %d\n", param->param); 738 DRM_DEBUG("Unknown parameter %d\n", param->param);
736 return -EINVAL; 739 return -EINVAL;
737 } 740 }
738 741
@@ -764,8 +767,15 @@ static int i915_setparam(struct drm_device *dev, void *data,
764 case I915_SETPARAM_ALLOW_BATCHBUFFER: 767 case I915_SETPARAM_ALLOW_BATCHBUFFER:
765 dev_priv->allow_batchbuffer = param->value; 768 dev_priv->allow_batchbuffer = param->value;
766 break; 769 break;
770 case I915_SETPARAM_NUM_USED_FENCES:
771 if (param->value > dev_priv->num_fence_regs ||
772 param->value < 0)
773 return -EINVAL;
774 /* Userspace can use first N regs */
775 dev_priv->fence_reg_start = param->value;
776 break;
767 default: 777 default:
768 DRM_ERROR("unknown parameter %d\n", param->param); 778 DRM_DEBUG("unknown parameter %d\n", param->param);
769 return -EINVAL; 779 return -EINVAL;
770 } 780 }
771 781
@@ -801,7 +811,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
801 dev_priv->hws_map.flags = 0; 811 dev_priv->hws_map.flags = 0;
802 dev_priv->hws_map.mtrr = 0; 812 dev_priv->hws_map.mtrr = 0;
803 813
804 drm_core_ioremap(&dev_priv->hws_map, dev); 814 drm_core_ioremap_wc(&dev_priv->hws_map, dev);
805 if (dev_priv->hws_map.handle == NULL) { 815 if (dev_priv->hws_map.handle == NULL) {
806 i915_dma_cleanup(dev); 816 i915_dma_cleanup(dev);
807 dev_priv->status_gfx_addr = 0; 817 dev_priv->status_gfx_addr = 0;
@@ -966,10 +976,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
966 if (ret) 976 if (ret)
967 goto kfree_devname; 977 goto kfree_devname;
968 978
969 dev_priv->mm.gtt_mapping =
970 io_mapping_create_wc(dev->agp->base,
971 dev->agp->agp_info.aper_size * 1024*1024);
972
973 /* Allow hardware batchbuffers unless told otherwise. 979 /* Allow hardware batchbuffers unless told otherwise.
974 */ 980 */
975 dev_priv->allow_batchbuffer = 1; 981 dev_priv->allow_batchbuffer = 1;
@@ -1081,6 +1087,28 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1081 goto free_priv; 1087 goto free_priv;
1082 } 1088 }
1083 1089
1090 dev_priv->mm.gtt_mapping =
1091 io_mapping_create_wc(dev->agp->base,
1092 dev->agp->agp_info.aper_size * 1024*1024);
1093 if (dev_priv->mm.gtt_mapping == NULL) {
1094 ret = -EIO;
1095 goto out_rmmap;
1096 }
1097
1098 /* Set up a WC MTRR for non-PAT systems. This is more common than
1099 * one would think, because the kernel disables PAT on first
1100 * generation Core chips because WC PAT gets overridden by a UC
1101 * MTRR if present. Even if a UC MTRR isn't present.
1102 */
1103 dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
1104 dev->agp->agp_info.aper_size *
1105 1024 * 1024,
1106 MTRR_TYPE_WRCOMB, 1);
1107 if (dev_priv->mm.gtt_mtrr < 0) {
1108 DRM_INFO("MTRR allocation failed\n. Graphics "
1109 "performance may suffer.\n");
1110 }
1111
1084#ifdef CONFIG_HIGHMEM64G 1112#ifdef CONFIG_HIGHMEM64G
1085 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */ 1113 /* don't enable GEM on PAE - needs agp + set_memory_* interface fixes */
1086 dev_priv->has_gem = 0; 1114 dev_priv->has_gem = 0;
@@ -1089,13 +1117,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1089 dev_priv->has_gem = 1; 1117 dev_priv->has_gem = 1;
1090#endif 1118#endif
1091 1119
1120 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1121 if (IS_GM45(dev))
1122 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1123
1092 i915_gem_load(dev); 1124 i915_gem_load(dev);
1093 1125
1094 /* Init HWS */ 1126 /* Init HWS */
1095 if (!I915_NEED_GFX_HWS(dev)) { 1127 if (!I915_NEED_GFX_HWS(dev)) {
1096 ret = i915_init_phys_hws(dev); 1128 ret = i915_init_phys_hws(dev);
1097 if (ret != 0) 1129 if (ret != 0)
1098 goto out_rmmap; 1130 goto out_iomapfree;
1099 } 1131 }
1100 1132
1101 /* On the 945G/GM, the chipset reports the MSI capability on the 1133 /* On the 945G/GM, the chipset reports the MSI capability on the
@@ -1134,6 +1166,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1134 1166
1135 return 0; 1167 return 0;
1136 1168
1169out_iomapfree:
1170 io_mapping_free(dev_priv->mm.gtt_mapping);
1137out_rmmap: 1171out_rmmap:
1138 iounmap(dev_priv->regs); 1172 iounmap(dev_priv->regs);
1139free_priv: 1173free_priv:
@@ -1145,8 +1179,14 @@ int i915_driver_unload(struct drm_device *dev)
1145{ 1179{
1146 struct drm_i915_private *dev_priv = dev->dev_private; 1180 struct drm_i915_private *dev_priv = dev->dev_private;
1147 1181
1182 io_mapping_free(dev_priv->mm.gtt_mapping);
1183 if (dev_priv->mm.gtt_mtrr >= 0) {
1184 mtrr_del(dev_priv->mm.gtt_mtrr, dev->agp->base,
1185 dev->agp->agp_info.aper_size * 1024 * 1024);
1186 dev_priv->mm.gtt_mtrr = -1;
1187 }
1188
1148 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1189 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1149 io_mapping_free(dev_priv->mm.gtt_mapping);
1150 drm_irq_uninstall(dev); 1190 drm_irq_uninstall(dev);
1151 } 1191 }
1152 1192
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f8b3df0926c0..b293ef0bae71 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -27,6 +27,7 @@
27 * 27 *
28 */ 28 */
29 29
30#include <linux/device.h>
30#include "drmP.h" 31#include "drmP.h"
31#include "drm.h" 32#include "drm.h"
32#include "i915_drm.h" 33#include "i915_drm.h"
@@ -66,6 +67,14 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
66 67
67 i915_save_state(dev); 68 i915_save_state(dev);
68 69
70 /* If KMS is active, we do the leavevt stuff here */
71 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
72 if (i915_gem_idle(dev))
73 dev_err(&dev->pdev->dev,
74 "GEM idle failed, resume may fail\n");
75 drm_irq_uninstall(dev);
76 }
77
69 intel_opregion_free(dev); 78 intel_opregion_free(dev);
70 79
71 if (state.event == PM_EVENT_SUSPEND) { 80 if (state.event == PM_EVENT_SUSPEND) {
@@ -79,6 +88,9 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
79 88
80static int i915_resume(struct drm_device *dev) 89static int i915_resume(struct drm_device *dev)
81{ 90{
91 struct drm_i915_private *dev_priv = dev->dev_private;
92 int ret = 0;
93
82 pci_set_power_state(dev->pdev, PCI_D0); 94 pci_set_power_state(dev->pdev, PCI_D0);
83 pci_restore_state(dev->pdev); 95 pci_restore_state(dev->pdev);
84 if (pci_enable_device(dev->pdev)) 96 if (pci_enable_device(dev->pdev))
@@ -89,11 +101,26 @@ static int i915_resume(struct drm_device *dev)
89 101
90 intel_opregion_init(dev); 102 intel_opregion_init(dev);
91 103
92 return 0; 104 /* KMS EnterVT equivalent */
105 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
106 mutex_lock(&dev->struct_mutex);
107 dev_priv->mm.suspended = 0;
108
109 ret = i915_gem_init_ringbuffer(dev);
110 if (ret != 0)
111 ret = -1;
112 mutex_unlock(&dev->struct_mutex);
113
114 drm_irq_install(dev);
115 }
116
117 return ret;
93} 118}
94 119
95static struct vm_operations_struct i915_gem_vm_ops = { 120static struct vm_operations_struct i915_gem_vm_ops = {
96 .fault = i915_gem_fault, 121 .fault = i915_gem_fault,
122 .open = drm_gem_vm_open,
123 .close = drm_gem_vm_close,
97}; 124};
98 125
99static struct drm_driver driver = { 126static struct drm_driver driver = {
@@ -112,7 +139,6 @@ static struct drm_driver driver = {
112 .suspend = i915_suspend, 139 .suspend = i915_suspend,
113 .resume = i915_resume, 140 .resume = i915_resume,
114 .device_is_agp = i915_driver_device_is_agp, 141 .device_is_agp = i915_driver_device_is_agp,
115 .get_vblank_counter = i915_get_vblank_counter,
116 .enable_vblank = i915_enable_vblank, 142 .enable_vblank = i915_enable_vblank,
117 .disable_vblank = i915_disable_vblank, 143 .disable_vblank = i915_disable_vblank,
118 .irq_preinstall = i915_driver_irq_preinstall, 144 .irq_preinstall = i915_driver_irq_preinstall,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e13518252007..17fa40858d26 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -184,6 +184,8 @@ typedef struct drm_i915_private {
184 unsigned int lvds_dither:1; 184 unsigned int lvds_dither:1;
185 unsigned int lvds_vbt:1; 185 unsigned int lvds_vbt:1;
186 unsigned int int_crt_support:1; 186 unsigned int int_crt_support:1;
187 unsigned int lvds_use_ssc:1;
188 int lvds_ssc_freq;
187 189
188 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 190 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
189 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 191 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
@@ -284,6 +286,7 @@ typedef struct drm_i915_private {
284 struct drm_mm gtt_space; 286 struct drm_mm gtt_space;
285 287
286 struct io_mapping *gtt_mapping; 288 struct io_mapping *gtt_mapping;
289 int gtt_mtrr;
287 290
288 /** 291 /**
289 * List of objects currently involved in rendering from the 292 * List of objects currently involved in rendering from the
@@ -534,6 +537,7 @@ extern int i915_vblank_pipe_get(struct drm_device *dev, void *data,
534extern int i915_enable_vblank(struct drm_device *dev, int crtc); 537extern int i915_enable_vblank(struct drm_device *dev, int crtc);
535extern void i915_disable_vblank(struct drm_device *dev, int crtc); 538extern void i915_disable_vblank(struct drm_device *dev, int crtc);
536extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc); 539extern u32 i915_get_vblank_counter(struct drm_device *dev, int crtc);
540extern u32 gm45_get_vblank_counter(struct drm_device *dev, int crtc);
537extern int i915_vblank_swap(struct drm_device *dev, void *data, 541extern int i915_vblank_swap(struct drm_device *dev, void *data,
538 struct drm_file *file_priv); 542 struct drm_file *file_priv);
539extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask); 543extern void i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask);
@@ -601,6 +605,7 @@ int i915_gem_init_object(struct drm_gem_object *obj);
601void i915_gem_free_object(struct drm_gem_object *obj); 605void i915_gem_free_object(struct drm_gem_object *obj);
602int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); 606int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
603void i915_gem_object_unpin(struct drm_gem_object *obj); 607void i915_gem_object_unpin(struct drm_gem_object *obj);
608int i915_gem_object_unbind(struct drm_gem_object *obj);
604void i915_gem_lastclose(struct drm_device *dev); 609void i915_gem_lastclose(struct drm_device *dev);
605uint32_t i915_get_gem_seqno(struct drm_device *dev); 610uint32_t i915_get_gem_seqno(struct drm_device *dev);
606void i915_gem_retire_requests(struct drm_device *dev); 611void i915_gem_retire_requests(struct drm_device *dev);
@@ -613,6 +618,7 @@ int i915_gem_init_ringbuffer(struct drm_device *dev);
613void i915_gem_cleanup_ringbuffer(struct drm_device *dev); 618void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
614int i915_gem_do_init(struct drm_device *dev, unsigned long start, 619int i915_gem_do_init(struct drm_device *dev, unsigned long start,
615 unsigned long end); 620 unsigned long end);
621int i915_gem_idle(struct drm_device *dev);
616int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf); 622int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
617int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, 623int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
618 int write); 624 int write);
@@ -784,6 +790,11 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev)) 790 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev))
785 791
786#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 792#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev))
793/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
794 * rows, which changed the alignment requirements and fence programming.
795 */
796#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
797 IS_I915GM(dev)))
787#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) 798#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev))
788 799
789#define PRIMARY_RINGBUFFER_SIZE (128*1024) 800#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index debad5c04cc0..85685bfd12da 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,10 +34,6 @@
34 34
35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 35#define I915_GEM_GPU_DOMAINS (~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT))
36 36
37static void
38i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
39 uint32_t read_domains,
40 uint32_t write_domain);
41static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj); 37static void i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
42static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj); 38static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
43static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj); 39static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
@@ -52,7 +48,7 @@ static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
52static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); 48static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
53static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, 49static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
54 unsigned alignment); 50 unsigned alignment);
55static void i915_gem_object_get_fence_reg(struct drm_gem_object *obj); 51static int i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write);
56static void i915_gem_clear_fence_reg(struct drm_gem_object *obj); 52static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
57static int i915_gem_evict_something(struct drm_device *dev); 53static int i915_gem_evict_something(struct drm_device *dev);
58static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj, 54static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
@@ -567,6 +563,7 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
567 pgoff_t page_offset; 563 pgoff_t page_offset;
568 unsigned long pfn; 564 unsigned long pfn;
569 int ret = 0; 565 int ret = 0;
566 bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
570 567
571 /* We don't use vmf->pgoff since that has the fake offset */ 568 /* We don't use vmf->pgoff since that has the fake offset */
572 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >> 569 page_offset = ((unsigned long)vmf->virtual_address - vma->vm_start) >>
@@ -585,8 +582,13 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
585 582
586 /* Need a new fence register? */ 583 /* Need a new fence register? */
587 if (obj_priv->fence_reg == I915_FENCE_REG_NONE && 584 if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
588 obj_priv->tiling_mode != I915_TILING_NONE) 585 obj_priv->tiling_mode != I915_TILING_NONE) {
589 i915_gem_object_get_fence_reg(obj); 586 ret = i915_gem_object_get_fence_reg(obj, write);
587 if (ret) {
588 mutex_unlock(&dev->struct_mutex);
589 return VM_FAULT_SIGBUS;
590 }
591 }
590 592
591 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) + 593 pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
592 page_offset; 594 page_offset;
@@ -601,8 +603,6 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
601 case -EAGAIN: 603 case -EAGAIN:
602 return VM_FAULT_OOM; 604 return VM_FAULT_OOM;
603 case -EFAULT: 605 case -EFAULT:
604 case -EBUSY:
605 DRM_ERROR("can't insert pfn?? fault or busy...\n");
606 return VM_FAULT_SIGBUS; 606 return VM_FAULT_SIGBUS;
607 default: 607 default:
608 return VM_FAULT_NOPAGE; 608 return VM_FAULT_NOPAGE;
@@ -678,6 +678,30 @@ out_free_list:
678 return ret; 678 return ret;
679} 679}
680 680
681static void
682i915_gem_free_mmap_offset(struct drm_gem_object *obj)
683{
684 struct drm_device *dev = obj->dev;
685 struct drm_i915_gem_object *obj_priv = obj->driver_private;
686 struct drm_gem_mm *mm = dev->mm_private;
687 struct drm_map_list *list;
688
689 list = &obj->map_list;
690 drm_ht_remove_item(&mm->offset_hash, &list->hash);
691
692 if (list->file_offset_node) {
693 drm_mm_put_block(list->file_offset_node);
694 list->file_offset_node = NULL;
695 }
696
697 if (list->map) {
698 drm_free(list->map, sizeof(struct drm_map), DRM_MEM_DRIVER);
699 list->map = NULL;
700 }
701
702 obj_priv->mmap_offset = 0;
703}
704
681/** 705/**
682 * i915_gem_get_gtt_alignment - return required GTT alignment for an object 706 * i915_gem_get_gtt_alignment - return required GTT alignment for an object
683 * @obj: object to check 707 * @obj: object to check
@@ -752,8 +776,11 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
752 776
753 if (!obj_priv->mmap_offset) { 777 if (!obj_priv->mmap_offset) {
754 ret = i915_gem_create_mmap_offset(obj); 778 ret = i915_gem_create_mmap_offset(obj);
755 if (ret) 779 if (ret) {
780 drm_gem_object_unreference(obj);
781 mutex_unlock(&dev->struct_mutex);
756 return ret; 782 return ret;
783 }
757 } 784 }
758 785
759 args->offset = obj_priv->mmap_offset; 786 args->offset = obj_priv->mmap_offset;
@@ -1024,6 +1051,9 @@ i915_gem_retire_requests(struct drm_device *dev)
1024 drm_i915_private_t *dev_priv = dev->dev_private; 1051 drm_i915_private_t *dev_priv = dev->dev_private;
1025 uint32_t seqno; 1052 uint32_t seqno;
1026 1053
1054 if (!dev_priv->hw_status_page)
1055 return;
1056
1027 seqno = i915_get_gem_seqno(dev); 1057 seqno = i915_get_gem_seqno(dev);
1028 1058
1029 while (!list_empty(&dev_priv->mm.request_list)) { 1059 while (!list_empty(&dev_priv->mm.request_list)) {
@@ -1211,7 +1241,7 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj)
1211/** 1241/**
1212 * Unbinds an object from the GTT aperture. 1242 * Unbinds an object from the GTT aperture.
1213 */ 1243 */
1214static int 1244int
1215i915_gem_object_unbind(struct drm_gem_object *obj) 1245i915_gem_object_unbind(struct drm_gem_object *obj)
1216{ 1246{
1217 struct drm_device *dev = obj->dev; 1247 struct drm_device *dev = obj->dev;
@@ -1445,21 +1475,26 @@ static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
1445 drm_i915_private_t *dev_priv = dev->dev_private; 1475 drm_i915_private_t *dev_priv = dev->dev_private;
1446 struct drm_i915_gem_object *obj_priv = obj->driver_private; 1476 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1447 int regnum = obj_priv->fence_reg; 1477 int regnum = obj_priv->fence_reg;
1478 int tile_width;
1448 uint32_t val; 1479 uint32_t val;
1449 uint32_t pitch_val; 1480 uint32_t pitch_val;
1450 1481
1451 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1482 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1452 (obj_priv->gtt_offset & (obj->size - 1))) { 1483 (obj_priv->gtt_offset & (obj->size - 1))) {
1453 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1484 WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
1485 __func__, obj_priv->gtt_offset, obj->size);
1454 return; 1486 return;
1455 } 1487 }
1456 1488
1457 if (obj_priv->tiling_mode == I915_TILING_Y && (IS_I945G(dev) || 1489 if (obj_priv->tiling_mode == I915_TILING_Y &&
1458 IS_I945GM(dev) || 1490 HAS_128_BYTE_Y_TILING(dev))
1459 IS_G33(dev))) 1491 tile_width = 128;
1460 pitch_val = (obj_priv->stride / 128) - 1;
1461 else 1492 else
1462 pitch_val = (obj_priv->stride / 512) - 1; 1493 tile_width = 512;
1494
1495 /* Note: pitch better be a power of two tile widths */
1496 pitch_val = obj_priv->stride / tile_width;
1497 pitch_val = ffs(pitch_val) - 1;
1463 1498
1464 val = obj_priv->gtt_offset; 1499 val = obj_priv->gtt_offset;
1465 if (obj_priv->tiling_mode == I915_TILING_Y) 1500 if (obj_priv->tiling_mode == I915_TILING_Y)
@@ -1483,7 +1518,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1483 1518
1484 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) || 1519 if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
1485 (obj_priv->gtt_offset & (obj->size - 1))) { 1520 (obj_priv->gtt_offset & (obj->size - 1))) {
1486 WARN(1, "%s: object not 1M or size aligned\n", __func__); 1521 WARN(1, "%s: object 0x%08x not 1M or size aligned\n",
1522 __func__, obj_priv->gtt_offset);
1487 return; 1523 return;
1488 } 1524 }
1489 1525
@@ -1503,6 +1539,7 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1503/** 1539/**
1504 * i915_gem_object_get_fence_reg - set up a fence reg for an object 1540 * i915_gem_object_get_fence_reg - set up a fence reg for an object
1505 * @obj: object to map through a fence reg 1541 * @obj: object to map through a fence reg
1542 * @write: object is about to be written
1506 * 1543 *
1507 * When mapping objects through the GTT, userspace wants to be able to write 1544 * When mapping objects through the GTT, userspace wants to be able to write
1508 * to them without having to worry about swizzling if the object is tiled. 1545 * to them without having to worry about swizzling if the object is tiled.
@@ -1513,8 +1550,8 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
1513 * It then sets up the reg based on the object's properties: address, pitch 1550 * It then sets up the reg based on the object's properties: address, pitch
1514 * and tiling format. 1551 * and tiling format.
1515 */ 1552 */
1516static void 1553static int
1517i915_gem_object_get_fence_reg(struct drm_gem_object *obj) 1554i915_gem_object_get_fence_reg(struct drm_gem_object *obj, bool write)
1518{ 1555{
1519 struct drm_device *dev = obj->dev; 1556 struct drm_device *dev = obj->dev;
1520 struct drm_i915_private *dev_priv = dev->dev_private; 1557 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1527,12 +1564,18 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj)
1527 WARN(1, "allocating a fence for non-tiled object?\n"); 1564 WARN(1, "allocating a fence for non-tiled object?\n");
1528 break; 1565 break;
1529 case I915_TILING_X: 1566 case I915_TILING_X:
1530 WARN(obj_priv->stride & (512 - 1), 1567 if (!obj_priv->stride)
1531 "object is X tiled but has non-512B pitch\n"); 1568 return -EINVAL;
1569 WARN((obj_priv->stride & (512 - 1)),
1570 "object 0x%08x is X tiled but has non-512B pitch\n",
1571 obj_priv->gtt_offset);
1532 break; 1572 break;
1533 case I915_TILING_Y: 1573 case I915_TILING_Y:
1534 WARN(obj_priv->stride & (128 - 1), 1574 if (!obj_priv->stride)
1535 "object is Y tiled but has non-128B pitch\n"); 1575 return -EINVAL;
1576 WARN((obj_priv->stride & (128 - 1)),
1577 "object 0x%08x is Y tiled but has non-128B pitch\n",
1578 obj_priv->gtt_offset);
1536 break; 1579 break;
1537 } 1580 }
1538 1581
@@ -1563,10 +1606,11 @@ try_again:
1563 * objects to finish before trying again. 1606 * objects to finish before trying again.
1564 */ 1607 */
1565 if (i == dev_priv->num_fence_regs) { 1608 if (i == dev_priv->num_fence_regs) {
1566 ret = i915_gem_object_wait_rendering(reg->obj); 1609 ret = i915_gem_object_set_to_gtt_domain(reg->obj, 0);
1567 if (ret) { 1610 if (ret) {
1568 WARN(ret, "wait_rendering failed: %d\n", ret); 1611 WARN(ret != -ERESTARTSYS,
1569 return; 1612 "switch to GTT domain failed: %d\n", ret);
1613 return ret;
1570 } 1614 }
1571 goto try_again; 1615 goto try_again;
1572 } 1616 }
@@ -1591,6 +1635,8 @@ try_again:
1591 i915_write_fence_reg(reg); 1635 i915_write_fence_reg(reg);
1592 else 1636 else
1593 i830_write_fence_reg(reg); 1637 i830_write_fence_reg(reg);
1638
1639 return 0;
1594} 1640}
1595 1641
1596/** 1642/**
@@ -1631,7 +1677,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
1631 if (dev_priv->mm.suspended) 1677 if (dev_priv->mm.suspended)
1632 return -EBUSY; 1678 return -EBUSY;
1633 if (alignment == 0) 1679 if (alignment == 0)
1634 alignment = PAGE_SIZE; 1680 alignment = i915_gem_get_gtt_alignment(obj);
1635 if (alignment & (PAGE_SIZE - 1)) { 1681 if (alignment & (PAGE_SIZE - 1)) {
1636 DRM_ERROR("Invalid object alignment requested %u\n", alignment); 1682 DRM_ERROR("Invalid object alignment requested %u\n", alignment);
1637 return -EINVAL; 1683 return -EINVAL;
@@ -1974,30 +2020,28 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
1974 * drm_agp_chipset_flush 2020 * drm_agp_chipset_flush
1975 */ 2021 */
1976static void 2022static void
1977i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj, 2023i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj)
1978 uint32_t read_domains,
1979 uint32_t write_domain)
1980{ 2024{
1981 struct drm_device *dev = obj->dev; 2025 struct drm_device *dev = obj->dev;
1982 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2026 struct drm_i915_gem_object *obj_priv = obj->driver_private;
1983 uint32_t invalidate_domains = 0; 2027 uint32_t invalidate_domains = 0;
1984 uint32_t flush_domains = 0; 2028 uint32_t flush_domains = 0;
1985 2029
1986 BUG_ON(read_domains & I915_GEM_DOMAIN_CPU); 2030 BUG_ON(obj->pending_read_domains & I915_GEM_DOMAIN_CPU);
1987 BUG_ON(write_domain == I915_GEM_DOMAIN_CPU); 2031 BUG_ON(obj->pending_write_domain == I915_GEM_DOMAIN_CPU);
1988 2032
1989#if WATCH_BUF 2033#if WATCH_BUF
1990 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", 2034 DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
1991 __func__, obj, 2035 __func__, obj,
1992 obj->read_domains, read_domains, 2036 obj->read_domains, obj->pending_read_domains,
1993 obj->write_domain, write_domain); 2037 obj->write_domain, obj->pending_write_domain);
1994#endif 2038#endif
1995 /* 2039 /*
1996 * If the object isn't moving to a new write domain, 2040 * If the object isn't moving to a new write domain,
1997 * let the object stay in multiple read domains 2041 * let the object stay in multiple read domains
1998 */ 2042 */
1999 if (write_domain == 0) 2043 if (obj->pending_write_domain == 0)
2000 read_domains |= obj->read_domains; 2044 obj->pending_read_domains |= obj->read_domains;
2001 else 2045 else
2002 obj_priv->dirty = 1; 2046 obj_priv->dirty = 1;
2003 2047
@@ -2007,15 +2051,17 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2007 * any read domains which differ from the old 2051 * any read domains which differ from the old
2008 * write domain 2052 * write domain
2009 */ 2053 */
2010 if (obj->write_domain && obj->write_domain != read_domains) { 2054 if (obj->write_domain &&
2055 obj->write_domain != obj->pending_read_domains) {
2011 flush_domains |= obj->write_domain; 2056 flush_domains |= obj->write_domain;
2012 invalidate_domains |= read_domains & ~obj->write_domain; 2057 invalidate_domains |=
2058 obj->pending_read_domains & ~obj->write_domain;
2013 } 2059 }
2014 /* 2060 /*
2015 * Invalidate any read caches which may have 2061 * Invalidate any read caches which may have
2016 * stale data. That is, any new read domains. 2062 * stale data. That is, any new read domains.
2017 */ 2063 */
2018 invalidate_domains |= read_domains & ~obj->read_domains; 2064 invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
2019 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { 2065 if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
2020#if WATCH_BUF 2066#if WATCH_BUF
2021 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", 2067 DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
@@ -2024,9 +2070,15 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
2024 i915_gem_clflush_object(obj); 2070 i915_gem_clflush_object(obj);
2025 } 2071 }
2026 2072
2027 if ((write_domain | flush_domains) != 0) 2073 /* The actual obj->write_domain will be updated with
2028 obj->write_domain = write_domain; 2074 * pending_write_domain after we emit the accumulated flush for all
2029 obj->read_domains = read_domains; 2075 * of our domain changes in execbuffers (which clears objects'
2076 * write_domains). So if we have a current write domain that we
2077 * aren't changing, set pending_write_domain to that.
2078 */
2079 if (flush_domains == 0 && obj->pending_write_domain == 0)
2080 obj->pending_write_domain = obj->write_domain;
2081 obj->read_domains = obj->pending_read_domains;
2030 2082
2031 dev->invalidate_domains |= invalidate_domains; 2083 dev->invalidate_domains |= invalidate_domains;
2032 dev->flush_domains |= flush_domains; 2084 dev->flush_domains |= flush_domains;
@@ -2229,6 +2281,8 @@ i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
2229 (int) reloc.offset, 2281 (int) reloc.offset,
2230 reloc.read_domains, 2282 reloc.read_domains,
2231 reloc.write_domain); 2283 reloc.write_domain);
2284 drm_gem_object_unreference(target_obj);
2285 i915_gem_object_unpin(obj);
2232 return -EINVAL; 2286 return -EINVAL;
2233 } 2287 }
2234 2288
@@ -2458,13 +2512,15 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2458 if (dev_priv->mm.wedged) { 2512 if (dev_priv->mm.wedged) {
2459 DRM_ERROR("Execbuf while wedged\n"); 2513 DRM_ERROR("Execbuf while wedged\n");
2460 mutex_unlock(&dev->struct_mutex); 2514 mutex_unlock(&dev->struct_mutex);
2461 return -EIO; 2515 ret = -EIO;
2516 goto pre_mutex_err;
2462 } 2517 }
2463 2518
2464 if (dev_priv->mm.suspended) { 2519 if (dev_priv->mm.suspended) {
2465 DRM_ERROR("Execbuf while VT-switched.\n"); 2520 DRM_ERROR("Execbuf while VT-switched.\n");
2466 mutex_unlock(&dev->struct_mutex); 2521 mutex_unlock(&dev->struct_mutex);
2467 return -EBUSY; 2522 ret = -EBUSY;
2523 goto pre_mutex_err;
2468 } 2524 }
2469 2525
2470 /* Look up object handles */ 2526 /* Look up object handles */
@@ -2532,9 +2588,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2532 struct drm_gem_object *obj = object_list[i]; 2588 struct drm_gem_object *obj = object_list[i];
2533 2589
2534 /* Compute new gpu domains and update invalidate/flush */ 2590 /* Compute new gpu domains and update invalidate/flush */
2535 i915_gem_object_set_to_gpu_domain(obj, 2591 i915_gem_object_set_to_gpu_domain(obj);
2536 obj->pending_read_domains,
2537 obj->pending_write_domain);
2538 } 2592 }
2539 2593
2540 i915_verify_inactive(dev, __FILE__, __LINE__); 2594 i915_verify_inactive(dev, __FILE__, __LINE__);
@@ -2553,6 +2607,12 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2553 (void)i915_add_request(dev, dev->flush_domains); 2607 (void)i915_add_request(dev, dev->flush_domains);
2554 } 2608 }
2555 2609
2610 for (i = 0; i < args->buffer_count; i++) {
2611 struct drm_gem_object *obj = object_list[i];
2612
2613 obj->write_domain = obj->pending_write_domain;
2614 }
2615
2556 i915_verify_inactive(dev, __FILE__, __LINE__); 2616 i915_verify_inactive(dev, __FILE__, __LINE__);
2557 2617
2558#if WATCH_COHERENCY 2618#if WATCH_COHERENCY
@@ -2610,15 +2670,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
2610 2670
2611 i915_verify_inactive(dev, __FILE__, __LINE__); 2671 i915_verify_inactive(dev, __FILE__, __LINE__);
2612 2672
2613 /* Copy the new buffer offsets back to the user's exec list. */
2614 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2615 (uintptr_t) args->buffers_ptr,
2616 exec_list,
2617 sizeof(*exec_list) * args->buffer_count);
2618 if (ret)
2619 DRM_ERROR("failed to copy %d exec entries "
2620 "back to user (%d)\n",
2621 args->buffer_count, ret);
2622err: 2673err:
2623 for (i = 0; i < pinned; i++) 2674 for (i = 0; i < pinned; i++)
2624 i915_gem_object_unpin(object_list[i]); 2675 i915_gem_object_unpin(object_list[i]);
@@ -2628,6 +2679,18 @@ err:
2628 2679
2629 mutex_unlock(&dev->struct_mutex); 2680 mutex_unlock(&dev->struct_mutex);
2630 2681
2682 if (!ret) {
2683 /* Copy the new buffer offsets back to the user's exec list. */
2684 ret = copy_to_user((struct drm_i915_relocation_entry __user *)
2685 (uintptr_t) args->buffers_ptr,
2686 exec_list,
2687 sizeof(*exec_list) * args->buffer_count);
2688 if (ret)
2689 DRM_ERROR("failed to copy %d exec entries "
2690 "back to user (%d)\n",
2691 args->buffer_count, ret);
2692 }
2693
2631pre_mutex_err: 2694pre_mutex_err:
2632 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 2695 drm_free(object_list, sizeof(*object_list) * args->buffer_count,
2633 DRM_MEM_DRIVER); 2696 DRM_MEM_DRIVER);
@@ -2652,6 +2715,14 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
2652 DRM_ERROR("Failure to bind: %d", ret); 2715 DRM_ERROR("Failure to bind: %d", ret);
2653 return ret; 2716 return ret;
2654 } 2717 }
2718 /*
2719 * Pre-965 chips need a fence register set up in order to
2720 * properly handle tiled surfaces.
2721 */
2722 if (!IS_I965G(dev) &&
2723 obj_priv->fence_reg == I915_FENCE_REG_NONE &&
2724 obj_priv->tiling_mode != I915_TILING_NONE)
2725 i915_gem_object_get_fence_reg(obj, true);
2655 } 2726 }
2656 obj_priv->pin_count++; 2727 obj_priv->pin_count++;
2657 2728
@@ -2723,6 +2794,7 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
2723 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) { 2794 if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
2724 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n", 2795 DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
2725 args->handle); 2796 args->handle);
2797 drm_gem_object_unreference(obj);
2726 mutex_unlock(&dev->struct_mutex); 2798 mutex_unlock(&dev->struct_mutex);
2727 return -EINVAL; 2799 return -EINVAL;
2728 } 2800 }
@@ -2803,6 +2875,13 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
2803 return -EBADF; 2875 return -EBADF;
2804 } 2876 }
2805 2877
2878 /* Update the active list for the hardware's current position.
2879 * Otherwise this only updates on a delayed timer or when irqs are
2880 * actually unmasked, and our working set ends up being larger than
2881 * required.
2882 */
2883 i915_gem_retire_requests(dev);
2884
2806 obj_priv = obj->driver_private; 2885 obj_priv = obj->driver_private;
2807 /* Don't count being on the flushing list against the object being 2886 /* Don't count being on the flushing list against the object being
2808 * done. Otherwise, a buffer left on the flushing list but not getting 2887 * done. Otherwise, a buffer left on the flushing list but not getting
@@ -2855,9 +2934,6 @@ int i915_gem_init_object(struct drm_gem_object *obj)
2855void i915_gem_free_object(struct drm_gem_object *obj) 2934void i915_gem_free_object(struct drm_gem_object *obj)
2856{ 2935{
2857 struct drm_device *dev = obj->dev; 2936 struct drm_device *dev = obj->dev;
2858 struct drm_gem_mm *mm = dev->mm_private;
2859 struct drm_map_list *list;
2860 struct drm_map *map;
2861 struct drm_i915_gem_object *obj_priv = obj->driver_private; 2937 struct drm_i915_gem_object *obj_priv = obj->driver_private;
2862 2938
2863 while (obj_priv->pin_count > 0) 2939 while (obj_priv->pin_count > 0)
@@ -2868,19 +2944,7 @@ void i915_gem_free_object(struct drm_gem_object *obj)
2868 2944
2869 i915_gem_object_unbind(obj); 2945 i915_gem_object_unbind(obj);
2870 2946
2871 list = &obj->map_list; 2947 i915_gem_free_mmap_offset(obj);
2872 drm_ht_remove_item(&mm->offset_hash, &list->hash);
2873
2874 if (list->file_offset_node) {
2875 drm_mm_put_block(list->file_offset_node);
2876 list->file_offset_node = NULL;
2877 }
2878
2879 map = list->map;
2880 if (map) {
2881 drm_free(map, sizeof(*map), DRM_MEM_DRIVER);
2882 list->map = NULL;
2883 }
2884 2948
2885 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); 2949 drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
2886 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); 2950 drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
@@ -2919,7 +2983,7 @@ i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
2919 return 0; 2983 return 0;
2920} 2984}
2921 2985
2922static int 2986int
2923i915_gem_idle(struct drm_device *dev) 2987i915_gem_idle(struct drm_device *dev)
2924{ 2988{
2925 drm_i915_private_t *dev_priv = dev->dev_private; 2989 drm_i915_private_t *dev_priv = dev->dev_private;
@@ -3065,6 +3129,7 @@ i915_gem_init_hws(struct drm_device *dev)
3065 if (dev_priv->hw_status_page == NULL) { 3129 if (dev_priv->hw_status_page == NULL) {
3066 DRM_ERROR("Failed to map status page.\n"); 3130 DRM_ERROR("Failed to map status page.\n");
3067 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); 3131 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3132 i915_gem_object_unpin(obj);
3068 drm_gem_object_unreference(obj); 3133 drm_gem_object_unreference(obj);
3069 return -EINVAL; 3134 return -EINVAL;
3070 } 3135 }
@@ -3077,6 +3142,31 @@ i915_gem_init_hws(struct drm_device *dev)
3077 return 0; 3142 return 0;
3078} 3143}
3079 3144
3145static void
3146i915_gem_cleanup_hws(struct drm_device *dev)
3147{
3148 drm_i915_private_t *dev_priv = dev->dev_private;
3149 struct drm_gem_object *obj;
3150 struct drm_i915_gem_object *obj_priv;
3151
3152 if (dev_priv->hws_obj == NULL)
3153 return;
3154
3155 obj = dev_priv->hws_obj;
3156 obj_priv = obj->driver_private;
3157
3158 kunmap(obj_priv->page_list[0]);
3159 i915_gem_object_unpin(obj);
3160 drm_gem_object_unreference(obj);
3161 dev_priv->hws_obj = NULL;
3162
3163 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3164 dev_priv->hw_status_page = NULL;
3165
3166 /* Write high address into HWS_PGA when disabling. */
3167 I915_WRITE(HWS_PGA, 0x1ffff000);
3168}
3169
3080int 3170int
3081i915_gem_init_ringbuffer(struct drm_device *dev) 3171i915_gem_init_ringbuffer(struct drm_device *dev)
3082{ 3172{
@@ -3094,6 +3184,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3094 obj = drm_gem_object_alloc(dev, 128 * 1024); 3184 obj = drm_gem_object_alloc(dev, 128 * 1024);
3095 if (obj == NULL) { 3185 if (obj == NULL) {
3096 DRM_ERROR("Failed to allocate ringbuffer\n"); 3186 DRM_ERROR("Failed to allocate ringbuffer\n");
3187 i915_gem_cleanup_hws(dev);
3097 return -ENOMEM; 3188 return -ENOMEM;
3098 } 3189 }
3099 obj_priv = obj->driver_private; 3190 obj_priv = obj->driver_private;
@@ -3101,6 +3192,7 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3101 ret = i915_gem_object_pin(obj, 4096); 3192 ret = i915_gem_object_pin(obj, 4096);
3102 if (ret != 0) { 3193 if (ret != 0) {
3103 drm_gem_object_unreference(obj); 3194 drm_gem_object_unreference(obj);
3195 i915_gem_cleanup_hws(dev);
3104 return ret; 3196 return ret;
3105 } 3197 }
3106 3198
@@ -3118,7 +3210,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
3118 if (ring->map.handle == NULL) { 3210 if (ring->map.handle == NULL) {
3119 DRM_ERROR("Failed to map ringbuffer.\n"); 3211 DRM_ERROR("Failed to map ringbuffer.\n");
3120 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3212 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3213 i915_gem_object_unpin(obj);
3121 drm_gem_object_unreference(obj); 3214 drm_gem_object_unreference(obj);
3215 i915_gem_cleanup_hws(dev);
3122 return -EINVAL; 3216 return -EINVAL;
3123 } 3217 }
3124 ring->ring_obj = obj; 3218 ring->ring_obj = obj;
@@ -3198,20 +3292,7 @@ i915_gem_cleanup_ringbuffer(struct drm_device *dev)
3198 dev_priv->ring.ring_obj = NULL; 3292 dev_priv->ring.ring_obj = NULL;
3199 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); 3293 memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
3200 3294
3201 if (dev_priv->hws_obj != NULL) { 3295 i915_gem_cleanup_hws(dev);
3202 struct drm_gem_object *obj = dev_priv->hws_obj;
3203 struct drm_i915_gem_object *obj_priv = obj->driver_private;
3204
3205 kunmap(obj_priv->page_list[0]);
3206 i915_gem_object_unpin(obj);
3207 drm_gem_object_unreference(obj);
3208 dev_priv->hws_obj = NULL;
3209 memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
3210 dev_priv->hw_status_page = NULL;
3211
3212 /* Write high address into HWS_PGA when disabling. */
3213 I915_WRITE(HWS_PGA, 0x1ffff000);
3214 }
3215} 3296}
3216 3297
3217int 3298int
@@ -3229,10 +3310,6 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
3229 dev_priv->mm.wedged = 0; 3310 dev_priv->mm.wedged = 0;
3230 } 3311 }
3231 3312
3232 dev_priv->mm.gtt_mapping = io_mapping_create_wc(dev->agp->base,
3233 dev->agp->agp_info.aper_size
3234 * 1024 * 1024);
3235
3236 mutex_lock(&dev->struct_mutex); 3313 mutex_lock(&dev->struct_mutex);
3237 dev_priv->mm.suspended = 0; 3314 dev_priv->mm.suspended = 0;
3238 3315
@@ -3255,7 +3332,6 @@ int
3255i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, 3332i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3256 struct drm_file *file_priv) 3333 struct drm_file *file_priv)
3257{ 3334{
3258 drm_i915_private_t *dev_priv = dev->dev_private;
3259 int ret; 3335 int ret;
3260 3336
3261 if (drm_core_check_feature(dev, DRIVER_MODESET)) 3337 if (drm_core_check_feature(dev, DRIVER_MODESET))
@@ -3264,7 +3340,6 @@ i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
3264 ret = i915_gem_idle(dev); 3340 ret = i915_gem_idle(dev);
3265 drm_irq_uninstall(dev); 3341 drm_irq_uninstall(dev);
3266 3342
3267 io_mapping_free(dev_priv->mm.gtt_mapping);
3268 return ret; 3343 return ret;
3269} 3344}
3270 3345
@@ -3273,6 +3348,9 @@ i915_gem_lastclose(struct drm_device *dev)
3273{ 3348{
3274 int ret; 3349 int ret;
3275 3350
3351 if (drm_core_check_feature(dev, DRIVER_MODESET))
3352 return;
3353
3276 ret = i915_gem_idle(dev); 3354 ret = i915_gem_idle(dev);
3277 if (ret) 3355 if (ret)
3278 DRM_ERROR("failed to idle hardware: %d\n", ret); 3356 DRM_ERROR("failed to idle hardware: %d\n", ret);
@@ -3294,7 +3372,7 @@ i915_gem_load(struct drm_device *dev)
3294 /* Old X drivers will take 0-2 for front, back, depth buffers */ 3372 /* Old X drivers will take 0-2 for front, back, depth buffers */
3295 dev_priv->fence_reg_start = 3; 3373 dev_priv->fence_reg_start = 3;
3296 3374
3297 if (IS_I965G(dev)) 3375 if (IS_I965G(dev) || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
3298 dev_priv->num_fence_regs = 16; 3376 dev_priv->num_fence_regs = 16;
3299 else 3377 else
3300 dev_priv->num_fence_regs = 8; 3378 dev_priv->num_fence_regs = 8;
@@ -3470,7 +3548,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
3470 user_data = (char __user *) (uintptr_t) args->data_ptr; 3548 user_data = (char __user *) (uintptr_t) args->data_ptr;
3471 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset; 3549 obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
3472 3550
3473 DRM_ERROR("obj_addr %p, %lld\n", obj_addr, args->size); 3551 DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
3474 ret = copy_from_user(obj_addr, user_data, args->size); 3552 ret = copy_from_user(obj_addr, user_data, args->size);
3475 if (ret) 3553 if (ret)
3476 return -EFAULT; 3554 return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 241f39b7f460..7fb4191ef934 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -173,6 +173,73 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
173 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 173 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
174} 174}
175 175
176
177/**
178 * Returns the size of the fence for a tiled object of the given size.
179 */
180static int
181i915_get_fence_size(struct drm_device *dev, int size)
182{
183 int i;
184 int start;
185
186 if (IS_I965G(dev)) {
187 /* The 965 can have fences at any page boundary. */
188 return ALIGN(size, 4096);
189 } else {
190 /* Align the size to a power of two greater than the smallest
191 * fence size.
192 */
193 if (IS_I9XX(dev))
194 start = 1024 * 1024;
195 else
196 start = 512 * 1024;
197
198 for (i = start; i < size; i <<= 1)
199 ;
200
201 return i;
202 }
203}
204
205/* Check pitch constriants for all chips & tiling formats */
206static bool
207i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
208{
209 int tile_width;
210
211 /* Linear is always fine */
212 if (tiling_mode == I915_TILING_NONE)
213 return true;
214
215 if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
216 tile_width = 128;
217 else
218 tile_width = 512;
219
220 /* 965+ just needs multiples of tile width */
221 if (IS_I965G(dev)) {
222 if (stride & (tile_width - 1))
223 return false;
224 return true;
225 }
226
227 /* Pre-965 needs power of two tile widths */
228 if (stride < tile_width)
229 return false;
230
231 if (stride & (stride - 1))
232 return false;
233
234 /* We don't handle the aperture area covered by the fence being bigger
235 * than the object size.
236 */
237 if (i915_get_fence_size(dev, size) != size)
238 return false;
239
240 return true;
241}
242
176/** 243/**
177 * Sets the tiling mode of an object, returning the required swizzling of 244 * Sets the tiling mode of an object, returning the required swizzling of
178 * bit 6 of addresses in the object. 245 * bit 6 of addresses in the object.
@@ -191,6 +258,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
191 return -EINVAL; 258 return -EINVAL;
192 obj_priv = obj->driver_private; 259 obj_priv = obj->driver_private;
193 260
261 if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
262 drm_gem_object_unreference(obj);
263 return -EINVAL;
264 }
265
194 mutex_lock(&dev->struct_mutex); 266 mutex_lock(&dev->struct_mutex);
195 267
196 if (args->tiling_mode == I915_TILING_NONE) { 268 if (args->tiling_mode == I915_TILING_NONE) {
@@ -207,12 +279,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
207 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; 279 args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
208 } 280 }
209 } 281 }
210 obj_priv->tiling_mode = args->tiling_mode; 282 if (args->tiling_mode != obj_priv->tiling_mode) {
211 obj_priv->stride = args->stride; 283 int ret;
212 284
213 mutex_unlock(&dev->struct_mutex); 285 /* Unbind the object, as switching tiling means we're
286 * switching the cache organization due to fencing, probably.
287 */
288 ret = i915_gem_object_unbind(obj);
289 if (ret != 0) {
290 WARN(ret != -ERESTARTSYS,
291 "failed to unbind object for tiling switch");
292 args->tiling_mode = obj_priv->tiling_mode;
293 mutex_unlock(&dev->struct_mutex);
294 drm_gem_object_unreference(obj);
295
296 return ret;
297 }
298 obj_priv->tiling_mode = args->tiling_mode;
299 }
300 obj_priv->stride = args->stride;
214 301
215 drm_gem_object_unreference(obj); 302 drm_gem_object_unreference(obj);
303 mutex_unlock(&dev->struct_mutex);
216 304
217 return 0; 305 return 0;
218} 306}
@@ -251,9 +339,8 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
251 DRM_ERROR("unknown tiling mode\n"); 339 DRM_ERROR("unknown tiling mode\n");
252 } 340 }
253 341
254 mutex_unlock(&dev->struct_mutex);
255
256 drm_gem_object_unreference(obj); 342 drm_gem_object_unreference(obj);
343 mutex_unlock(&dev->struct_mutex);
257 344
258 return 0; 345 return 0;
259} 346}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 6290219de6c8..87b6b603469e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -174,6 +174,19 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
174 return count; 174 return count;
175} 175}
176 176
177u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
178{
179 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
180 int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
181
182 if (!i915_pipe_enabled(dev, pipe)) {
183 DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
184 return 0;
185 }
186
187 return I915_READ(reg);
188}
189
177irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 190irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
178{ 191{
179 struct drm_device *dev = (struct drm_device *) arg; 192 struct drm_device *dev = (struct drm_device *) arg;
@@ -370,12 +383,13 @@ int i915_irq_emit(struct drm_device *dev, void *data,
370 drm_i915_irq_emit_t *emit = data; 383 drm_i915_irq_emit_t *emit = data;
371 int result; 384 int result;
372 385
373 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
374
375 if (!dev_priv) { 386 if (!dev_priv) {
376 DRM_ERROR("called with no initialization\n"); 387 DRM_ERROR("called with no initialization\n");
377 return -EINVAL; 388 return -EINVAL;
378 } 389 }
390
391 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
392
379 mutex_lock(&dev->struct_mutex); 393 mutex_lock(&dev->struct_mutex);
380 result = i915_emit_irq(dev); 394 result = i915_emit_irq(dev);
381 mutex_unlock(&dev->struct_mutex); 395 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 273162579e1b..9d6539a868b3 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -186,12 +186,12 @@
186#define FENCE_REG_830_0 0x2000 186#define FENCE_REG_830_0 0x2000
187#define I830_FENCE_START_MASK 0x07f80000 187#define I830_FENCE_START_MASK 0x07f80000
188#define I830_FENCE_TILING_Y_SHIFT 12 188#define I830_FENCE_TILING_Y_SHIFT 12
189#define I830_FENCE_SIZE_BITS(size) ((get_order(size >> 19) - 1) << 8) 189#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
190#define I830_FENCE_PITCH_SHIFT 4 190#define I830_FENCE_PITCH_SHIFT 4
191#define I830_FENCE_REG_VALID (1<<0) 191#define I830_FENCE_REG_VALID (1<<0)
192 192
193#define I915_FENCE_START_MASK 0x0ff00000 193#define I915_FENCE_START_MASK 0x0ff00000
194#define I915_FENCE_SIZE_BITS(size) ((get_order(size >> 20) - 1) << 8) 194#define I915_FENCE_SIZE_BITS(size) ((ffs((size) >> 20) - 1) << 8)
195 195
196#define FENCE_REG_965_0 0x03000 196#define FENCE_REG_965_0 0x03000
197#define I965_FENCE_PITCH_SHIFT 2 197#define I965_FENCE_PITCH_SHIFT 2
@@ -1371,6 +1371,9 @@
1371#define PIPE_FRAME_LOW_SHIFT 24 1371#define PIPE_FRAME_LOW_SHIFT 24
1372#define PIPE_PIXEL_MASK 0x00ffffff 1372#define PIPE_PIXEL_MASK 0x00ffffff
1373#define PIPE_PIXEL_SHIFT 0 1373#define PIPE_PIXEL_SHIFT 0
1374/* GM45+ just has to be different */
1375#define PIPEA_FRMCOUNT_GM45 0x70040
1376#define PIPEA_FLIPCOUNT_GM45 0x70044
1374 1377
1375/* Cursor A & B regs */ 1378/* Cursor A & B regs */
1376#define CURACNTR 0x70080 1379#define CURACNTR 0x70080
@@ -1439,6 +1442,9 @@
1439#define PIPEBSTAT 0x71024 1442#define PIPEBSTAT 0x71024
1440#define PIPEBFRAMEHIGH 0x71040 1443#define PIPEBFRAMEHIGH 0x71040
1441#define PIPEBFRAMEPIXEL 0x71044 1444#define PIPEBFRAMEPIXEL 0x71044
1445#define PIPEB_FRMCOUNT_GM45 0x71040
1446#define PIPEB_FLIPCOUNT_GM45 0x71044
1447
1442 1448
1443/* Display B control */ 1449/* Display B control */
1444#define DSPBCNTR 0x71180 1450#define DSPBCNTR 0x71180
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 4ca82a025525..fc28e2bbd542 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -111,6 +111,12 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
111 panel_fixed_mode->clock = dvo_timing->clock * 10; 111 panel_fixed_mode->clock = dvo_timing->clock * 10;
112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED; 112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
113 113
114 /* Some VBTs have bogus h/vtotal values */
115 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
116 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
117 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
118 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
119
114 drm_mode_set_name(panel_fixed_mode); 120 drm_mode_set_name(panel_fixed_mode);
115 121
116 dev_priv->vbt_mode = panel_fixed_mode; 122 dev_priv->vbt_mode = panel_fixed_mode;
@@ -135,6 +141,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
135 if (general) { 141 if (general) {
136 dev_priv->int_tv_support = general->int_tv_support; 142 dev_priv->int_tv_support = general->int_tv_support;
137 dev_priv->int_crt_support = general->int_crt_support; 143 dev_priv->int_crt_support = general->int_crt_support;
144 dev_priv->lvds_use_ssc = general->enable_ssc;
145
146 if (dev_priv->lvds_use_ssc) {
147 if (IS_I855(dev_priv->dev))
148 dev_priv->lvds_ssc_freq = general->ssc_freq ? 66 : 48;
149 else
150 dev_priv->lvds_ssc_freq = general->ssc_freq ? 100 : 96;
151 }
138 } 152 }
139} 153}
140 154
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 31c3732b7a69..a2834276cb38 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -90,12 +90,12 @@ typedef struct {
90#define I9XX_DOT_MAX 400000 90#define I9XX_DOT_MAX 400000
91#define I9XX_VCO_MIN 1400000 91#define I9XX_VCO_MIN 1400000
92#define I9XX_VCO_MAX 2800000 92#define I9XX_VCO_MAX 2800000
93#define I9XX_N_MIN 3 93#define I9XX_N_MIN 1
94#define I9XX_N_MAX 8 94#define I9XX_N_MAX 6
95#define I9XX_M_MIN 70 95#define I9XX_M_MIN 70
96#define I9XX_M_MAX 120 96#define I9XX_M_MAX 120
97#define I9XX_M1_MIN 10 97#define I9XX_M1_MIN 10
98#define I9XX_M1_MAX 20 98#define I9XX_M1_MAX 22
99#define I9XX_M2_MIN 5 99#define I9XX_M2_MIN 5
100#define I9XX_M2_MAX 9 100#define I9XX_M2_MAX 9
101#define I9XX_P_SDVO_DAC_MIN 5 101#define I9XX_P_SDVO_DAC_MIN 5
@@ -189,9 +189,7 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
189 return limit; 189 return limit;
190} 190}
191 191
192/** Derive the pixel clock for the given refclk and divisors for 8xx chips. */ 192static void intel_clock(int refclk, intel_clock_t *clock)
193
194static void i8xx_clock(int refclk, intel_clock_t *clock)
195{ 193{
196 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2); 194 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
197 clock->p = clock->p1 * clock->p2; 195 clock->p = clock->p1 * clock->p2;
@@ -199,25 +197,6 @@ static void i8xx_clock(int refclk, intel_clock_t *clock)
199 clock->dot = clock->vco / clock->p; 197 clock->dot = clock->vco / clock->p;
200} 198}
201 199
202/** Derive the pixel clock for the given refclk and divisors for 9xx chips. */
203
204static void i9xx_clock(int refclk, intel_clock_t *clock)
205{
206 clock->m = 5 * (clock->m1 + 2) + (clock->m2 + 2);
207 clock->p = clock->p1 * clock->p2;
208 clock->vco = refclk * clock->m / (clock->n + 2);
209 clock->dot = clock->vco / clock->p;
210}
211
212static void intel_clock(struct drm_device *dev, int refclk,
213 intel_clock_t *clock)
214{
215 if (IS_I9XX(dev))
216 i9xx_clock (refclk, clock);
217 else
218 i8xx_clock (refclk, clock);
219}
220
221/** 200/**
222 * Returns whether any output on the specified pipe is of the specified type 201 * Returns whether any output on the specified pipe is of the specified type
223 */ 202 */
@@ -238,7 +217,7 @@ bool intel_pipe_has_type (struct drm_crtc *crtc, int type)
238 return false; 217 return false;
239} 218}
240 219
241#define INTELPllInvalid(s) { /* ErrorF (s) */; return false; } 220#define INTELPllInvalid(s) do { /* DRM_DEBUG(s); */ return false; } while (0)
242/** 221/**
243 * Returns whether the given set of divisors are valid for a given refclk with 222 * Returns whether the given set of divisors are valid for a given refclk with
244 * the given connectors. 223 * the given connectors.
@@ -318,7 +297,7 @@ static bool intel_find_best_PLL(struct drm_crtc *crtc, int target,
318 clock.p1 <= limit->p1.max; clock.p1++) { 297 clock.p1 <= limit->p1.max; clock.p1++) {
319 int this_err; 298 int this_err;
320 299
321 intel_clock(dev, refclk, &clock); 300 intel_clock(refclk, &clock);
322 301
323 if (!intel_PLL_is_valid(crtc, &clock)) 302 if (!intel_PLL_is_valid(crtc, &clock))
324 continue; 303 continue;
@@ -343,7 +322,7 @@ intel_wait_for_vblank(struct drm_device *dev)
343 udelay(20000); 322 udelay(20000);
344} 323}
345 324
346static void 325static int
347intel_pipe_set_base(struct drm_crtc *crtc, int x, int y, 326intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
348 struct drm_framebuffer *old_fb) 327 struct drm_framebuffer *old_fb)
349{ 328{
@@ -361,11 +340,21 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
361 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE; 340 int dspstride = (pipe == 0) ? DSPASTRIDE : DSPBSTRIDE;
362 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR; 341 int dspcntr_reg = (pipe == 0) ? DSPACNTR : DSPBCNTR;
363 u32 dspcntr, alignment; 342 u32 dspcntr, alignment;
343 int ret;
364 344
365 /* no fb bound */ 345 /* no fb bound */
366 if (!crtc->fb) { 346 if (!crtc->fb) {
367 DRM_DEBUG("No FB bound\n"); 347 DRM_DEBUG("No FB bound\n");
368 return; 348 return 0;
349 }
350
351 switch (pipe) {
352 case 0:
353 case 1:
354 break;
355 default:
356 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe);
357 return -EINVAL;
369 } 358 }
370 359
371 intel_fb = to_intel_framebuffer(crtc->fb); 360 intel_fb = to_intel_framebuffer(crtc->fb);
@@ -377,28 +366,30 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
377 alignment = 64 * 1024; 366 alignment = 64 * 1024;
378 break; 367 break;
379 case I915_TILING_X: 368 case I915_TILING_X:
380 if (IS_I9XX(dev)) 369 /* pin() will align the object as required by fence */
381 alignment = 1024 * 1024; 370 alignment = 0;
382 else
383 alignment = 512 * 1024;
384 break; 371 break;
385 case I915_TILING_Y: 372 case I915_TILING_Y:
386 /* FIXME: Is this true? */ 373 /* FIXME: Is this true? */
387 DRM_ERROR("Y tiled not allowed for scan out buffers\n"); 374 DRM_ERROR("Y tiled not allowed for scan out buffers\n");
388 return; 375 return -EINVAL;
389 default: 376 default:
390 BUG(); 377 BUG();
391 } 378 }
392 379
393 if (i915_gem_object_pin(intel_fb->obj, alignment)) 380 mutex_lock(&dev->struct_mutex);
394 return; 381 ret = i915_gem_object_pin(intel_fb->obj, alignment);
395 382 if (ret != 0) {
396 i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1); 383 mutex_unlock(&dev->struct_mutex);
397 384 return ret;
398 Start = obj_priv->gtt_offset; 385 }
399 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
400 386
401 I915_WRITE(dspstride, crtc->fb->pitch); 387 ret = i915_gem_object_set_to_gtt_domain(intel_fb->obj, 1);
388 if (ret != 0) {
389 i915_gem_object_unpin(intel_fb->obj);
390 mutex_unlock(&dev->struct_mutex);
391 return ret;
392 }
402 393
403 dspcntr = I915_READ(dspcntr_reg); 394 dspcntr = I915_READ(dspcntr_reg);
404 /* Mask out pixel format bits in case we change it */ 395 /* Mask out pixel format bits in case we change it */
@@ -419,11 +410,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
419 break; 410 break;
420 default: 411 default:
421 DRM_ERROR("Unknown color depth\n"); 412 DRM_ERROR("Unknown color depth\n");
422 return; 413 i915_gem_object_unpin(intel_fb->obj);
414 mutex_unlock(&dev->struct_mutex);
415 return -EINVAL;
423 } 416 }
424 I915_WRITE(dspcntr_reg, dspcntr); 417 I915_WRITE(dspcntr_reg, dspcntr);
425 418
419 Start = obj_priv->gtt_offset;
420 Offset = y * crtc->fb->pitch + x * (crtc->fb->bits_per_pixel / 8);
421
426 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y); 422 DRM_DEBUG("Writing base %08lX %08lX %d %d\n", Start, Offset, x, y);
423 I915_WRITE(dspstride, crtc->fb->pitch);
427 if (IS_I965G(dev)) { 424 if (IS_I965G(dev)) {
428 I915_WRITE(dspbase, Offset); 425 I915_WRITE(dspbase, Offset);
429 I915_READ(dspbase); 426 I915_READ(dspbase);
@@ -440,27 +437,24 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
440 intel_fb = to_intel_framebuffer(old_fb); 437 intel_fb = to_intel_framebuffer(old_fb);
441 i915_gem_object_unpin(intel_fb->obj); 438 i915_gem_object_unpin(intel_fb->obj);
442 } 439 }
440 mutex_unlock(&dev->struct_mutex);
443 441
444 if (!dev->primary->master) 442 if (!dev->primary->master)
445 return; 443 return 0;
446 444
447 master_priv = dev->primary->master->driver_priv; 445 master_priv = dev->primary->master->driver_priv;
448 if (!master_priv->sarea_priv) 446 if (!master_priv->sarea_priv)
449 return; 447 return 0;
450 448
451 switch (pipe) { 449 if (pipe) {
452 case 0:
453 master_priv->sarea_priv->pipeA_x = x;
454 master_priv->sarea_priv->pipeA_y = y;
455 break;
456 case 1:
457 master_priv->sarea_priv->pipeB_x = x; 450 master_priv->sarea_priv->pipeB_x = x;
458 master_priv->sarea_priv->pipeB_y = y; 451 master_priv->sarea_priv->pipeB_y = y;
459 break; 452 } else {
460 default: 453 master_priv->sarea_priv->pipeA_x = x;
461 DRM_ERROR("Can't update pipe %d in SAREA\n", pipe); 454 master_priv->sarea_priv->pipeA_y = y;
462 break;
463 } 455 }
456
457 return 0;
464} 458}
465 459
466 460
@@ -708,11 +702,11 @@ static int intel_panel_fitter_pipe (struct drm_device *dev)
708 return 1; 702 return 1;
709} 703}
710 704
711static void intel_crtc_mode_set(struct drm_crtc *crtc, 705static int intel_crtc_mode_set(struct drm_crtc *crtc,
712 struct drm_display_mode *mode, 706 struct drm_display_mode *mode,
713 struct drm_display_mode *adjusted_mode, 707 struct drm_display_mode *adjusted_mode,
714 int x, int y, 708 int x, int y,
715 struct drm_framebuffer *old_fb) 709 struct drm_framebuffer *old_fb)
716{ 710{
717 struct drm_device *dev = crtc->dev; 711 struct drm_device *dev = crtc->dev;
718 struct drm_i915_private *dev_priv = dev->dev_private; 712 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -732,13 +726,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
732 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE; 726 int dspsize_reg = (pipe == 0) ? DSPASIZE : DSPBSIZE;
733 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS; 727 int dsppos_reg = (pipe == 0) ? DSPAPOS : DSPBPOS;
734 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC; 728 int pipesrc_reg = (pipe == 0) ? PIPEASRC : PIPEBSRC;
735 int refclk; 729 int refclk, num_outputs = 0;
736 intel_clock_t clock; 730 intel_clock_t clock;
737 u32 dpll = 0, fp = 0, dspcntr, pipeconf; 731 u32 dpll = 0, fp = 0, dspcntr, pipeconf;
738 bool ok, is_sdvo = false, is_dvo = false; 732 bool ok, is_sdvo = false, is_dvo = false;
739 bool is_crt = false, is_lvds = false, is_tv = false; 733 bool is_crt = false, is_lvds = false, is_tv = false;
740 struct drm_mode_config *mode_config = &dev->mode_config; 734 struct drm_mode_config *mode_config = &dev->mode_config;
741 struct drm_connector *connector; 735 struct drm_connector *connector;
736 int ret;
742 737
743 drm_vblank_pre_modeset(dev, pipe); 738 drm_vblank_pre_modeset(dev, pipe);
744 739
@@ -755,6 +750,8 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
755 case INTEL_OUTPUT_SDVO: 750 case INTEL_OUTPUT_SDVO:
756 case INTEL_OUTPUT_HDMI: 751 case INTEL_OUTPUT_HDMI:
757 is_sdvo = true; 752 is_sdvo = true;
753 if (intel_output->needs_tv_clock)
754 is_tv = true;
758 break; 755 break;
759 case INTEL_OUTPUT_DVO: 756 case INTEL_OUTPUT_DVO:
760 is_dvo = true; 757 is_dvo = true;
@@ -766,9 +763,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
766 is_crt = true; 763 is_crt = true;
767 break; 764 break;
768 } 765 }
766
767 num_outputs++;
769 } 768 }
770 769
771 if (IS_I9XX(dev)) { 770 if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2) {
771 refclk = dev_priv->lvds_ssc_freq * 1000;
772 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
773 } else if (IS_I9XX(dev)) {
772 refclk = 96000; 774 refclk = 96000;
773 } else { 775 } else {
774 refclk = 48000; 776 refclk = 48000;
@@ -777,7 +779,7 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
777 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock); 779 ok = intel_find_best_PLL(crtc, adjusted_mode->clock, refclk, &clock);
778 if (!ok) { 780 if (!ok) {
779 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 781 DRM_ERROR("Couldn't find PLL settings for mode!\n");
780 return; 782 return -EINVAL;
781 } 783 }
782 784
783 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 785 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
@@ -827,11 +829,14 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
827 } 829 }
828 } 830 }
829 831
830 if (is_tv) { 832 if (is_sdvo && is_tv)
833 dpll |= PLL_REF_INPUT_TVCLKINBC;
834 else if (is_tv)
831 /* XXX: just matching BIOS for now */ 835 /* XXX: just matching BIOS for now */
832/* dpll |= PLL_REF_INPUT_TVCLKINBC; */ 836 /* dpll |= PLL_REF_INPUT_TVCLKINBC; */
833 dpll |= 3; 837 dpll |= 3;
834 } 838 else if (is_lvds && dev_priv->lvds_use_ssc && num_outputs < 2)
839 dpll |= PLLB_REF_INPUT_SPREADSPECTRUMIN;
835 else 840 else
836 dpll |= PLL_REF_INPUT_DREFCLK; 841 dpll |= PLL_REF_INPUT_DREFCLK;
837 842
@@ -948,9 +953,13 @@ static void intel_crtc_mode_set(struct drm_crtc *crtc,
948 I915_WRITE(dspcntr_reg, dspcntr); 953 I915_WRITE(dspcntr_reg, dspcntr);
949 954
950 /* Flush the plane changes */ 955 /* Flush the plane changes */
951 intel_pipe_set_base(crtc, x, y, old_fb); 956 ret = intel_pipe_set_base(crtc, x, y, old_fb);
957 if (ret != 0)
958 return ret;
952 959
953 drm_vblank_post_modeset(dev, pipe); 960 drm_vblank_post_modeset(dev, pipe);
961
962 return 0;
954} 963}
955 964
956/** Loads the palette/gamma unit for the CRTC with the prepared values */ 965/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -999,6 +1008,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
999 temp = CURSOR_MODE_DISABLE; 1008 temp = CURSOR_MODE_DISABLE;
1000 addr = 0; 1009 addr = 0;
1001 bo = NULL; 1010 bo = NULL;
1011 mutex_lock(&dev->struct_mutex);
1002 goto finish; 1012 goto finish;
1003 } 1013 }
1004 1014
@@ -1021,18 +1031,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1021 } 1031 }
1022 1032
1023 /* we only need to pin inside GTT if cursor is non-phy */ 1033 /* we only need to pin inside GTT if cursor is non-phy */
1034 mutex_lock(&dev->struct_mutex);
1024 if (!dev_priv->cursor_needs_physical) { 1035 if (!dev_priv->cursor_needs_physical) {
1025 ret = i915_gem_object_pin(bo, PAGE_SIZE); 1036 ret = i915_gem_object_pin(bo, PAGE_SIZE);
1026 if (ret) { 1037 if (ret) {
1027 DRM_ERROR("failed to pin cursor bo\n"); 1038 DRM_ERROR("failed to pin cursor bo\n");
1028 goto fail; 1039 goto fail_locked;
1029 } 1040 }
1030 addr = obj_priv->gtt_offset; 1041 addr = obj_priv->gtt_offset;
1031 } else { 1042 } else {
1032 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1); 1043 ret = i915_gem_attach_phys_object(dev, bo, (pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1);
1033 if (ret) { 1044 if (ret) {
1034 DRM_ERROR("failed to attach phys object\n"); 1045 DRM_ERROR("failed to attach phys object\n");
1035 goto fail; 1046 goto fail_locked;
1036 } 1047 }
1037 addr = obj_priv->phys_obj->handle->busaddr; 1048 addr = obj_priv->phys_obj->handle->busaddr;
1038 } 1049 }
@@ -1052,10 +1063,9 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1052 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo); 1063 i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
1053 } else 1064 } else
1054 i915_gem_object_unpin(intel_crtc->cursor_bo); 1065 i915_gem_object_unpin(intel_crtc->cursor_bo);
1055 mutex_lock(&dev->struct_mutex);
1056 drm_gem_object_unreference(intel_crtc->cursor_bo); 1066 drm_gem_object_unreference(intel_crtc->cursor_bo);
1057 mutex_unlock(&dev->struct_mutex);
1058 } 1067 }
1068 mutex_unlock(&dev->struct_mutex);
1059 1069
1060 intel_crtc->cursor_addr = addr; 1070 intel_crtc->cursor_addr = addr;
1061 intel_crtc->cursor_bo = bo; 1071 intel_crtc->cursor_bo = bo;
@@ -1063,6 +1073,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1063 return 0; 1073 return 0;
1064fail: 1074fail:
1065 mutex_lock(&dev->struct_mutex); 1075 mutex_lock(&dev->struct_mutex);
1076fail_locked:
1066 drm_gem_object_unreference(bo); 1077 drm_gem_object_unreference(bo);
1067 mutex_unlock(&dev->struct_mutex); 1078 mutex_unlock(&dev->struct_mutex);
1068 return ret; 1079 return ret;
@@ -1290,7 +1301,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1290 } 1301 }
1291 1302
1292 /* XXX: Handle the 100Mhz refclk */ 1303 /* XXX: Handle the 100Mhz refclk */
1293 i9xx_clock(96000, &clock); 1304 intel_clock(96000, &clock);
1294 } else { 1305 } else {
1295 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN); 1306 bool is_lvds = (pipe == 1) && (I915_READ(LVDS) & LVDS_PORT_EN);
1296 1307
@@ -1302,9 +1313,9 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1302 if ((dpll & PLL_REF_INPUT_MASK) == 1313 if ((dpll & PLL_REF_INPUT_MASK) ==
1303 PLLB_REF_INPUT_SPREADSPECTRUMIN) { 1314 PLLB_REF_INPUT_SPREADSPECTRUMIN) {
1304 /* XXX: might not be 66MHz */ 1315 /* XXX: might not be 66MHz */
1305 i8xx_clock(66000, &clock); 1316 intel_clock(66000, &clock);
1306 } else 1317 } else
1307 i8xx_clock(48000, &clock); 1318 intel_clock(48000, &clock);
1308 } else { 1319 } else {
1309 if (dpll & PLL_P1_DIVIDE_BY_TWO) 1320 if (dpll & PLL_P1_DIVIDE_BY_TWO)
1310 clock.p1 = 2; 1321 clock.p1 = 2;
@@ -1317,7 +1328,7 @@ static int intel_crtc_clock_get(struct drm_device *dev, struct drm_crtc *crtc)
1317 else 1328 else
1318 clock.p2 = 2; 1329 clock.p2 = 2;
1319 1330
1320 i8xx_clock(48000, &clock); 1331 intel_clock(48000, &clock);
1321 } 1332 }
1322 } 1333 }
1323 1334
@@ -1452,6 +1463,7 @@ static int intel_connector_clones(struct drm_device *dev, int type_mask)
1452 1463
1453static void intel_setup_outputs(struct drm_device *dev) 1464static void intel_setup_outputs(struct drm_device *dev)
1454{ 1465{
1466 struct drm_i915_private *dev_priv = dev->dev_private;
1455 struct drm_connector *connector; 1467 struct drm_connector *connector;
1456 1468
1457 intel_crt_init(dev); 1469 intel_crt_init(dev);
@@ -1463,13 +1475,16 @@ static void intel_setup_outputs(struct drm_device *dev)
1463 if (IS_I9XX(dev)) { 1475 if (IS_I9XX(dev)) {
1464 int found; 1476 int found;
1465 1477
1466 found = intel_sdvo_init(dev, SDVOB); 1478 if (I915_READ(SDVOB) & SDVO_DETECTED) {
1467 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1479 found = intel_sdvo_init(dev, SDVOB);
1468 intel_hdmi_init(dev, SDVOB); 1480 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1469 1481 intel_hdmi_init(dev, SDVOB);
1470 found = intel_sdvo_init(dev, SDVOC); 1482 }
1471 if (!found && SUPPORTS_INTEGRATED_HDMI(dev)) 1483 if (!IS_G4X(dev) || (I915_READ(SDVOB) & SDVO_DETECTED)) {
1472 intel_hdmi_init(dev, SDVOC); 1484 found = intel_sdvo_init(dev, SDVOC);
1485 if (!found && SUPPORTS_INTEGRATED_HDMI(dev))
1486 intel_hdmi_init(dev, SDVOC);
1487 }
1473 } else 1488 } else
1474 intel_dvo_init(dev); 1489 intel_dvo_init(dev);
1475 1490
@@ -1592,7 +1607,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
1592 1607
1593 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj); 1608 ret = intel_framebuffer_create(dev, mode_cmd, &fb, obj);
1594 if (ret) { 1609 if (ret) {
1610 mutex_lock(&dev->struct_mutex);
1595 drm_gem_object_unreference(obj); 1611 drm_gem_object_unreference(obj);
1612 mutex_unlock(&dev->struct_mutex);
1596 return NULL; 1613 return NULL;
1597 } 1614 }
1598 1615
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 8a4cc50c5b4e..957daef8edff 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -82,6 +82,7 @@ struct intel_output {
82 struct intel_i2c_chan *i2c_bus; /* for control functions */ 82 struct intel_i2c_chan *i2c_bus; /* for control functions */
83 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */ 83 struct intel_i2c_chan *ddc_bus; /* for DDC only stuff */
84 bool load_detect_temp; 84 bool load_detect_temp;
85 bool needs_tv_clock;
85 void *dev_priv; 86 void *dev_priv;
86}; 87};
87 88
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index afd1217b8a02..b7f0ebe9f810 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -473,7 +473,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo); 473 ret = intel_framebuffer_create(dev, &mode_cmd, &fb, fbo);
474 if (ret) { 474 if (ret) {
475 DRM_ERROR("failed to allocate fb.\n"); 475 DRM_ERROR("failed to allocate fb.\n");
476 goto out_unref; 476 goto out_unpin;
477 } 477 }
478 478
479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list); 479 list_add(&fb->filp_head, &dev->mode_config.fb_kernel_list);
@@ -484,7 +484,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
484 info = framebuffer_alloc(sizeof(struct intelfb_par), device); 484 info = framebuffer_alloc(sizeof(struct intelfb_par), device);
485 if (!info) { 485 if (!info) {
486 ret = -ENOMEM; 486 ret = -ENOMEM;
487 goto out_unref; 487 goto out_unpin;
488 } 488 }
489 489
490 par = info->par; 490 par = info->par;
@@ -513,7 +513,7 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
513 size); 513 size);
514 if (!info->screen_base) { 514 if (!info->screen_base) {
515 ret = -ENOSPC; 515 ret = -ENOSPC;
516 goto out_unref; 516 goto out_unpin;
517 } 517 }
518 info->screen_size = size; 518 info->screen_size = size;
519 519
@@ -608,6 +608,8 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
608 mutex_unlock(&dev->struct_mutex); 608 mutex_unlock(&dev->struct_mutex);
609 return 0; 609 return 0;
610 610
611out_unpin:
612 i915_gem_object_unpin(fbo);
611out_unref: 613out_unref:
612 drm_gem_object_unreference(fbo); 614 drm_gem_object_unreference(fbo);
613 mutex_unlock(&dev->struct_mutex); 615 mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index b36a5214d8df..0d211af98854 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -27,6 +27,7 @@
27 * Jesse Barnes <jesse.barnes@intel.com> 27 * Jesse Barnes <jesse.barnes@intel.com>
28 */ 28 */
29 29
30#include <linux/dmi.h>
30#include <linux/i2c.h> 31#include <linux/i2c.h>
31#include "drmP.h" 32#include "drmP.h"
32#include "drm.h" 33#include "drm.h"
@@ -311,10 +312,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
311 if (dev_priv->panel_fixed_mode != NULL) { 312 if (dev_priv->panel_fixed_mode != NULL) {
312 struct drm_display_mode *mode; 313 struct drm_display_mode *mode;
313 314
314 mutex_lock(&dev->mode_config.mutex);
315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode); 315 mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
316 drm_mode_probed_add(connector, mode); 316 drm_mode_probed_add(connector, mode);
317 mutex_unlock(&dev->mode_config.mutex);
318 317
319 return 1; 318 return 1;
320 } 319 }
@@ -405,6 +404,16 @@ void intel_lvds_init(struct drm_device *dev)
405 u32 lvds; 404 u32 lvds;
406 int pipe; 405 int pipe;
407 406
407 /* Blacklist machines that we know falsely report LVDS. */
408 /* FIXME: add a check for the Aopen Mini PC */
409
410 /* Apple Mac Mini Core Duo and Mac Mini Core 2 Duo */
411 if(dmi_match(DMI_PRODUCT_NAME, "Macmini1,1") ||
412 dmi_match(DMI_PRODUCT_NAME, "Macmini2,1")) {
413 DRM_DEBUG("Skipping LVDS initialization for Apple Mac Mini\n");
414 return;
415 }
416
408 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 417 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
409 if (!intel_output) { 418 if (!intel_output) {
410 return; 419 return;
@@ -458,7 +467,7 @@ void intel_lvds_init(struct drm_device *dev)
458 dev_priv->panel_fixed_mode = 467 dev_priv->panel_fixed_mode =
459 drm_mode_duplicate(dev, scan); 468 drm_mode_duplicate(dev, scan);
460 mutex_unlock(&dev->mode_config.mutex); 469 mutex_unlock(&dev->mode_config.mutex);
461 goto out; /* FIXME: check for quirks */ 470 goto out;
462 } 471 }
463 mutex_unlock(&dev->mode_config.mutex); 472 mutex_unlock(&dev->mode_config.mutex);
464 } 473 }
@@ -472,8 +481,6 @@ void intel_lvds_init(struct drm_device *dev)
472 if (dev_priv->panel_fixed_mode) { 481 if (dev_priv->panel_fixed_mode) {
473 dev_priv->panel_fixed_mode->type |= 482 dev_priv->panel_fixed_mode->type |=
474 DRM_MODE_TYPE_PREFERRED; 483 DRM_MODE_TYPE_PREFERRED;
475 drm_mode_probed_add(connector,
476 dev_priv->panel_fixed_mode);
477 goto out; 484 goto out;
478 } 485 }
479 } 486 }
@@ -492,7 +499,7 @@ void intel_lvds_init(struct drm_device *dev)
492 if (dev_priv->panel_fixed_mode) { 499 if (dev_priv->panel_fixed_mode) {
493 dev_priv->panel_fixed_mode->type |= 500 dev_priv->panel_fixed_mode->type |=
494 DRM_MODE_TYPE_PREFERRED; 501 DRM_MODE_TYPE_PREFERRED;
495 goto out; /* FIXME: check for quirks */ 502 goto out;
496 } 503 }
497 } 504 }
498 505
@@ -500,38 +507,6 @@ void intel_lvds_init(struct drm_device *dev)
500 if (!dev_priv->panel_fixed_mode) 507 if (!dev_priv->panel_fixed_mode)
501 goto failed; 508 goto failed;
502 509
503 /* FIXME: detect aopen & mac mini type stuff automatically? */
504 /*
505 * Blacklist machines with BIOSes that list an LVDS panel without
506 * actually having one.
507 */
508 if (IS_I945GM(dev)) {
509 /* aopen mini pc */
510 if (dev->pdev->subsystem_vendor == 0xa0a0)
511 goto failed;
512
513 if ((dev->pdev->subsystem_vendor == 0x8086) &&
514 (dev->pdev->subsystem_device == 0x7270)) {
515 /* It's a Mac Mini or Macbook Pro.
516 *
517 * Apple hardware is out to get us. The macbook pro
518 * has a real LVDS panel, but the mac mini does not,
519 * and they have the same device IDs. We'll
520 * distinguish by panel size, on the assumption
521 * that Apple isn't about to make any machines with an
522 * 800x600 display.
523 */
524
525 if (dev_priv->panel_fixed_mode != NULL &&
526 dev_priv->panel_fixed_mode->hdisplay == 800 &&
527 dev_priv->panel_fixed_mode->vdisplay == 600) {
528 DRM_DEBUG("Suspected Mac Mini, ignoring the LVDS\n");
529 goto failed;
530 }
531 }
532 }
533
534
535out: 510out:
536 drm_sysfs_connector_add(connector); 511 drm_sysfs_connector_add(connector);
537 return; 512 return;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 407215469102..fbe6f3931b1b 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -40,13 +40,59 @@
40struct intel_sdvo_priv { 40struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus; 41 struct intel_i2c_chan *i2c_bus;
42 int slaveaddr; 42 int slaveaddr;
43
44 /* Register for the SDVO device: SDVOB or SDVOC */
43 int output_device; 45 int output_device;
44 46
45 u16 active_outputs; 47 /* Active outputs controlled by this SDVO output */
48 uint16_t controlled_output;
46 49
50 /*
51 * Capabilities of the SDVO device returned by
52 * i830_sdvo_get_capabilities()
53 */
47 struct intel_sdvo_caps caps; 54 struct intel_sdvo_caps caps;
55
56 /* Pixel clock limitations reported by the SDVO device, in kHz */
48 int pixel_clock_min, pixel_clock_max; 57 int pixel_clock_min, pixel_clock_max;
49 58
59 /**
60 * This is set if we're going to treat the device as TV-out.
61 *
62 * While we have these nice friendly flags for output types that ought
63 * to decide this for us, the S-Video output on our HDMI+S-Video card
64 * shows up as RGB1 (VGA).
65 */
66 bool is_tv;
67
68 /**
69 * This is set if we treat the device as HDMI, instead of DVI.
70 */
71 bool is_hdmi;
72
73 /**
74 * Returned SDTV resolutions allowed for the current format, if the
75 * device reported it.
76 */
77 struct intel_sdvo_sdtv_resolution_reply sdtv_resolutions;
78
79 /**
80 * Current selected TV format.
81 *
82 * This is stored in the same structure that's passed to the device, for
83 * convenience.
84 */
85 struct intel_sdvo_tv_format tv_format;
86
87 /*
88 * supported encoding mode, used to determine whether HDMI is
89 * supported
90 */
91 struct intel_sdvo_encode encode;
92
93 /* DDC bus used by this SDVO output */
94 uint8_t ddc_bus;
95
50 int save_sdvo_mult; 96 int save_sdvo_mult;
51 u16 save_active_outputs; 97 u16 save_active_outputs;
52 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2; 98 struct intel_sdvo_dtd save_input_dtd_1, save_input_dtd_2;
@@ -147,9 +193,9 @@ static bool intel_sdvo_write_byte(struct intel_output *intel_output, int addr,
147 193
148#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd} 194#define SDVO_CMD_NAME_ENTRY(cmd) {cmd, #cmd}
149/** Mapping of command numbers to names, for debug output */ 195/** Mapping of command numbers to names, for debug output */
150const static struct _sdvo_cmd_name { 196static const struct _sdvo_cmd_name {
151 u8 cmd; 197 u8 cmd;
152 char *name; 198 char *name;
153} sdvo_cmd_names[] = { 199} sdvo_cmd_names[] = {
154 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET), 200 SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
155 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS), 201 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
@@ -186,8 +232,35 @@ const static struct _sdvo_cmd_name {
186 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS), 232 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
187 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT), 233 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
188 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT), 234 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
189 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_RESOLUTION_SUPPORT), 235 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
236 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
237 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
238 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
190 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH), 239 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
240 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
241 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
242 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
243 /* HDMI op code */
244 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
245 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
246 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
247 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
248 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
249 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
250 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
251 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
252 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
253 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
254 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
255 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
256 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
257 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
258 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
259 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
260 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
261 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
262 SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
263 SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
191}; 264};
192 265
193#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC") 266#define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
@@ -506,6 +579,50 @@ static bool intel_sdvo_set_output_timing(struct intel_output *intel_output,
506 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd); 579 SDVO_CMD_SET_OUTPUT_TIMINGS_PART1, dtd);
507} 580}
508 581
582static bool
583intel_sdvo_create_preferred_input_timing(struct intel_output *output,
584 uint16_t clock,
585 uint16_t width,
586 uint16_t height)
587{
588 struct intel_sdvo_preferred_input_timing_args args;
589 uint8_t status;
590
591 args.clock = clock;
592 args.width = width;
593 args.height = height;
594 intel_sdvo_write_cmd(output, SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING,
595 &args, sizeof(args));
596 status = intel_sdvo_read_response(output, NULL, 0);
597 if (status != SDVO_CMD_STATUS_SUCCESS)
598 return false;
599
600 return true;
601}
602
603static bool intel_sdvo_get_preferred_input_timing(struct intel_output *output,
604 struct intel_sdvo_dtd *dtd)
605{
606 bool status;
607
608 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1,
609 NULL, 0);
610
611 status = intel_sdvo_read_response(output, &dtd->part1,
612 sizeof(dtd->part1));
613 if (status != SDVO_CMD_STATUS_SUCCESS)
614 return false;
615
616 intel_sdvo_write_cmd(output, SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2,
617 NULL, 0);
618
619 status = intel_sdvo_read_response(output, &dtd->part2,
620 sizeof(dtd->part2));
621 if (status != SDVO_CMD_STATUS_SUCCESS)
622 return false;
623
624 return false;
625}
509 626
510static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output) 627static int intel_sdvo_get_clock_rate_mult(struct intel_output *intel_output)
511{ 628{
@@ -536,36 +653,12 @@ static bool intel_sdvo_set_clock_rate_mult(struct intel_output *intel_output, u8
536 return true; 653 return true;
537} 654}
538 655
539static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder, 656static void intel_sdvo_get_dtd_from_mode(struct intel_sdvo_dtd *dtd,
540 struct drm_display_mode *mode, 657 struct drm_display_mode *mode)
541 struct drm_display_mode *adjusted_mode)
542{
543 /* Make the CRTC code factor in the SDVO pixel multiplier. The SDVO
544 * device will be told of the multiplier during mode_set.
545 */
546 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
547 return true;
548}
549
550static void intel_sdvo_mode_set(struct drm_encoder *encoder,
551 struct drm_display_mode *mode,
552 struct drm_display_mode *adjusted_mode)
553{ 658{
554 struct drm_device *dev = encoder->dev; 659 uint16_t width, height;
555 struct drm_i915_private *dev_priv = dev->dev_private; 660 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
556 struct drm_crtc *crtc = encoder->crtc; 661 uint16_t h_sync_offset, v_sync_offset;
557 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
558 struct intel_output *intel_output = enc_to_intel_output(encoder);
559 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
560 u16 width, height;
561 u16 h_blank_len, h_sync_len, v_blank_len, v_sync_len;
562 u16 h_sync_offset, v_sync_offset;
563 u32 sdvox;
564 struct intel_sdvo_dtd output_dtd;
565 int sdvo_pixel_multiply;
566
567 if (!mode)
568 return;
569 662
570 width = mode->crtc_hdisplay; 663 width = mode->crtc_hdisplay;
571 height = mode->crtc_vdisplay; 664 height = mode->crtc_vdisplay;
@@ -580,93 +673,423 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
580 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start; 673 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
581 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start; 674 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
582 675
583 output_dtd.part1.clock = mode->clock / 10; 676 dtd->part1.clock = mode->clock / 10;
584 output_dtd.part1.h_active = width & 0xff; 677 dtd->part1.h_active = width & 0xff;
585 output_dtd.part1.h_blank = h_blank_len & 0xff; 678 dtd->part1.h_blank = h_blank_len & 0xff;
586 output_dtd.part1.h_high = (((width >> 8) & 0xf) << 4) | 679 dtd->part1.h_high = (((width >> 8) & 0xf) << 4) |
587 ((h_blank_len >> 8) & 0xf); 680 ((h_blank_len >> 8) & 0xf);
588 output_dtd.part1.v_active = height & 0xff; 681 dtd->part1.v_active = height & 0xff;
589 output_dtd.part1.v_blank = v_blank_len & 0xff; 682 dtd->part1.v_blank = v_blank_len & 0xff;
590 output_dtd.part1.v_high = (((height >> 8) & 0xf) << 4) | 683 dtd->part1.v_high = (((height >> 8) & 0xf) << 4) |
591 ((v_blank_len >> 8) & 0xf); 684 ((v_blank_len >> 8) & 0xf);
592 685
593 output_dtd.part2.h_sync_off = h_sync_offset; 686 dtd->part2.h_sync_off = h_sync_offset;
594 output_dtd.part2.h_sync_width = h_sync_len & 0xff; 687 dtd->part2.h_sync_width = h_sync_len & 0xff;
595 output_dtd.part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 | 688 dtd->part2.v_sync_off_width = (v_sync_offset & 0xf) << 4 |
596 (v_sync_len & 0xf); 689 (v_sync_len & 0xf);
597 output_dtd.part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) | 690 dtd->part2.sync_off_width_high = ((h_sync_offset & 0x300) >> 2) |
598 ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) | 691 ((h_sync_len & 0x300) >> 4) | ((v_sync_offset & 0x30) >> 2) |
599 ((v_sync_len & 0x30) >> 4); 692 ((v_sync_len & 0x30) >> 4);
600 693
601 output_dtd.part2.dtd_flags = 0x18; 694 dtd->part2.dtd_flags = 0x18;
602 if (mode->flags & DRM_MODE_FLAG_PHSYNC) 695 if (mode->flags & DRM_MODE_FLAG_PHSYNC)
603 output_dtd.part2.dtd_flags |= 0x2; 696 dtd->part2.dtd_flags |= 0x2;
604 if (mode->flags & DRM_MODE_FLAG_PVSYNC) 697 if (mode->flags & DRM_MODE_FLAG_PVSYNC)
605 output_dtd.part2.dtd_flags |= 0x4; 698 dtd->part2.dtd_flags |= 0x4;
699
700 dtd->part2.sdvo_flags = 0;
701 dtd->part2.v_sync_off_high = v_sync_offset & 0xc0;
702 dtd->part2.reserved = 0;
703}
704
705static void intel_sdvo_get_mode_from_dtd(struct drm_display_mode * mode,
706 struct intel_sdvo_dtd *dtd)
707{
708 uint16_t width, height;
709 uint16_t h_blank_len, h_sync_len, v_blank_len, v_sync_len;
710 uint16_t h_sync_offset, v_sync_offset;
711
712 width = mode->crtc_hdisplay;
713 height = mode->crtc_vdisplay;
714
715 /* do some mode translations */
716 h_blank_len = mode->crtc_hblank_end - mode->crtc_hblank_start;
717 h_sync_len = mode->crtc_hsync_end - mode->crtc_hsync_start;
718
719 v_blank_len = mode->crtc_vblank_end - mode->crtc_vblank_start;
720 v_sync_len = mode->crtc_vsync_end - mode->crtc_vsync_start;
721
722 h_sync_offset = mode->crtc_hsync_start - mode->crtc_hblank_start;
723 v_sync_offset = mode->crtc_vsync_start - mode->crtc_vblank_start;
724
725 mode->hdisplay = dtd->part1.h_active;
726 mode->hdisplay += ((dtd->part1.h_high >> 4) & 0x0f) << 8;
727 mode->hsync_start = mode->hdisplay + dtd->part2.h_sync_off;
728 mode->hsync_start += (dtd->part2.sync_off_width_high & 0xa0) << 2;
729 mode->hsync_end = mode->hsync_start + dtd->part2.h_sync_width;
730 mode->hsync_end += (dtd->part2.sync_off_width_high & 0x30) << 4;
731 mode->htotal = mode->hdisplay + dtd->part1.h_blank;
732 mode->htotal += (dtd->part1.h_high & 0xf) << 8;
733
734 mode->vdisplay = dtd->part1.v_active;
735 mode->vdisplay += ((dtd->part1.v_high >> 4) & 0x0f) << 8;
736 mode->vsync_start = mode->vdisplay;
737 mode->vsync_start += (dtd->part2.v_sync_off_width >> 4) & 0xf;
738 mode->vsync_start += (dtd->part2.sync_off_width_high & 0x0a) << 2;
739 mode->vsync_start += dtd->part2.v_sync_off_high & 0xc0;
740 mode->vsync_end = mode->vsync_start +
741 (dtd->part2.v_sync_off_width & 0xf);
742 mode->vsync_end += (dtd->part2.sync_off_width_high & 0x3) << 4;
743 mode->vtotal = mode->vdisplay + dtd->part1.v_blank;
744 mode->vtotal += (dtd->part1.v_high & 0xf) << 8;
745
746 mode->clock = dtd->part1.clock * 10;
747
748 mode->flags &= (DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC);
749 if (dtd->part2.dtd_flags & 0x2)
750 mode->flags |= DRM_MODE_FLAG_PHSYNC;
751 if (dtd->part2.dtd_flags & 0x4)
752 mode->flags |= DRM_MODE_FLAG_PVSYNC;
753}
754
755static bool intel_sdvo_get_supp_encode(struct intel_output *output,
756 struct intel_sdvo_encode *encode)
757{
758 uint8_t status;
759
760 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SUPP_ENCODE, NULL, 0);
761 status = intel_sdvo_read_response(output, encode, sizeof(*encode));
762 if (status != SDVO_CMD_STATUS_SUCCESS) { /* non-support means DVI */
763 memset(encode, 0, sizeof(*encode));
764 return false;
765 }
766
767 return true;
768}
769
770static bool intel_sdvo_set_encode(struct intel_output *output, uint8_t mode)
771{
772 uint8_t status;
773
774 intel_sdvo_write_cmd(output, SDVO_CMD_SET_ENCODE, &mode, 1);
775 status = intel_sdvo_read_response(output, NULL, 0);
776
777 return (status == SDVO_CMD_STATUS_SUCCESS);
778}
779
780static bool intel_sdvo_set_colorimetry(struct intel_output *output,
781 uint8_t mode)
782{
783 uint8_t status;
784
785 intel_sdvo_write_cmd(output, SDVO_CMD_SET_COLORIMETRY, &mode, 1);
786 status = intel_sdvo_read_response(output, NULL, 0);
787
788 return (status == SDVO_CMD_STATUS_SUCCESS);
789}
790
791#if 0
792static void intel_sdvo_dump_hdmi_buf(struct intel_output *output)
793{
794 int i, j;
795 uint8_t set_buf_index[2];
796 uint8_t av_split;
797 uint8_t buf_size;
798 uint8_t buf[48];
799 uint8_t *pos;
800
801 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_AV_SPLIT, NULL, 0);
802 intel_sdvo_read_response(output, &av_split, 1);
803
804 for (i = 0; i <= av_split; i++) {
805 set_buf_index[0] = i; set_buf_index[1] = 0;
806 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX,
807 set_buf_index, 2);
808 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_INFO, NULL, 0);
809 intel_sdvo_read_response(output, &buf_size, 1);
810
811 pos = buf;
812 for (j = 0; j <= buf_size; j += 8) {
813 intel_sdvo_write_cmd(output, SDVO_CMD_GET_HBUF_DATA,
814 NULL, 0);
815 intel_sdvo_read_response(output, pos, 8);
816 pos += 8;
817 }
818 }
819}
820#endif
821
822static void intel_sdvo_set_hdmi_buf(struct intel_output *output, int index,
823 uint8_t *data, int8_t size, uint8_t tx_rate)
824{
825 uint8_t set_buf_index[2];
826
827 set_buf_index[0] = index;
828 set_buf_index[1] = 0;
829
830 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_INDEX, set_buf_index, 2);
831
832 for (; size > 0; size -= 8) {
833 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_DATA, data, 8);
834 data += 8;
835 }
836
837 intel_sdvo_write_cmd(output, SDVO_CMD_SET_HBUF_TXRATE, &tx_rate, 1);
838}
839
840static uint8_t intel_sdvo_calc_hbuf_csum(uint8_t *data, uint8_t size)
841{
842 uint8_t csum = 0;
843 int i;
844
845 for (i = 0; i < size; i++)
846 csum += data[i];
847
848 return 0x100 - csum;
849}
850
851#define DIP_TYPE_AVI 0x82
852#define DIP_VERSION_AVI 0x2
853#define DIP_LEN_AVI 13
854
855struct dip_infoframe {
856 uint8_t type;
857 uint8_t version;
858 uint8_t len;
859 uint8_t checksum;
860 union {
861 struct {
862 /* Packet Byte #1 */
863 uint8_t S:2;
864 uint8_t B:2;
865 uint8_t A:1;
866 uint8_t Y:2;
867 uint8_t rsvd1:1;
868 /* Packet Byte #2 */
869 uint8_t R:4;
870 uint8_t M:2;
871 uint8_t C:2;
872 /* Packet Byte #3 */
873 uint8_t SC:2;
874 uint8_t Q:2;
875 uint8_t EC:3;
876 uint8_t ITC:1;
877 /* Packet Byte #4 */
878 uint8_t VIC:7;
879 uint8_t rsvd2:1;
880 /* Packet Byte #5 */
881 uint8_t PR:4;
882 uint8_t rsvd3:4;
883 /* Packet Byte #6~13 */
884 uint16_t top_bar_end;
885 uint16_t bottom_bar_start;
886 uint16_t left_bar_end;
887 uint16_t right_bar_start;
888 } avi;
889 struct {
890 /* Packet Byte #1 */
891 uint8_t channel_count:3;
892 uint8_t rsvd1:1;
893 uint8_t coding_type:4;
894 /* Packet Byte #2 */
895 uint8_t sample_size:2; /* SS0, SS1 */
896 uint8_t sample_frequency:3;
897 uint8_t rsvd2:3;
898 /* Packet Byte #3 */
899 uint8_t coding_type_private:5;
900 uint8_t rsvd3:3;
901 /* Packet Byte #4 */
902 uint8_t channel_allocation;
903 /* Packet Byte #5 */
904 uint8_t rsvd4:3;
905 uint8_t level_shift:4;
906 uint8_t downmix_inhibit:1;
907 } audio;
908 uint8_t payload[28];
909 } __attribute__ ((packed)) u;
910} __attribute__((packed));
911
912static void intel_sdvo_set_avi_infoframe(struct intel_output *output,
913 struct drm_display_mode * mode)
914{
915 struct dip_infoframe avi_if = {
916 .type = DIP_TYPE_AVI,
917 .version = DIP_VERSION_AVI,
918 .len = DIP_LEN_AVI,
919 };
920
921 avi_if.checksum = intel_sdvo_calc_hbuf_csum((uint8_t *)&avi_if,
922 4 + avi_if.len);
923 intel_sdvo_set_hdmi_buf(output, 1, (uint8_t *)&avi_if, 4 + avi_if.len,
924 SDVO_HBUF_TX_VSYNC);
925}
926
927static bool intel_sdvo_mode_fixup(struct drm_encoder *encoder,
928 struct drm_display_mode *mode,
929 struct drm_display_mode *adjusted_mode)
930{
931 struct intel_output *output = enc_to_intel_output(encoder);
932 struct intel_sdvo_priv *dev_priv = output->dev_priv;
606 933
607 output_dtd.part2.sdvo_flags = 0; 934 if (!dev_priv->is_tv) {
608 output_dtd.part2.v_sync_off_high = v_sync_offset & 0xc0; 935 /* Make the CRTC code factor in the SDVO pixel multiplier. The
609 output_dtd.part2.reserved = 0; 936 * SDVO device will be told of the multiplier during mode_set.
937 */
938 adjusted_mode->clock *= intel_sdvo_get_pixel_multiplier(mode);
939 } else {
940 struct intel_sdvo_dtd output_dtd;
941 bool success;
942
943 /* We need to construct preferred input timings based on our
944 * output timings. To do that, we have to set the output
945 * timings, even though this isn't really the right place in
946 * the sequence to do it. Oh well.
947 */
948
949
950 /* Set output timings */
951 intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
952 intel_sdvo_set_target_output(output,
953 dev_priv->controlled_output);
954 intel_sdvo_set_output_timing(output, &output_dtd);
955
956 /* Set the input timing to the screen. Assume always input 0. */
957 intel_sdvo_set_target_input(output, true, false);
958
959
960 success = intel_sdvo_create_preferred_input_timing(output,
961 mode->clock / 10,
962 mode->hdisplay,
963 mode->vdisplay);
964 if (success) {
965 struct intel_sdvo_dtd input_dtd;
610 966
611 /* Set the output timing to the screen */ 967 intel_sdvo_get_preferred_input_timing(output,
612 intel_sdvo_set_target_output(intel_output, sdvo_priv->active_outputs); 968 &input_dtd);
613 intel_sdvo_set_output_timing(intel_output, &output_dtd); 969 intel_sdvo_get_mode_from_dtd(adjusted_mode, &input_dtd);
970
971 } else {
972 return false;
973 }
974 }
975 return true;
976}
977
978static void intel_sdvo_mode_set(struct drm_encoder *encoder,
979 struct drm_display_mode *mode,
980 struct drm_display_mode *adjusted_mode)
981{
982 struct drm_device *dev = encoder->dev;
983 struct drm_i915_private *dev_priv = dev->dev_private;
984 struct drm_crtc *crtc = encoder->crtc;
985 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
986 struct intel_output *output = enc_to_intel_output(encoder);
987 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
988 u32 sdvox = 0;
989 int sdvo_pixel_multiply;
990 struct intel_sdvo_in_out_map in_out;
991 struct intel_sdvo_dtd input_dtd;
992 u8 status;
993
994 if (!mode)
995 return;
996
997 /* First, set the input mapping for the first input to our controlled
998 * output. This is only correct if we're a single-input device, in
999 * which case the first input is the output from the appropriate SDVO
1000 * channel on the motherboard. In a two-input device, the first input
1001 * will be SDVOB and the second SDVOC.
1002 */
1003 in_out.in0 = sdvo_priv->controlled_output;
1004 in_out.in1 = 0;
1005
1006 intel_sdvo_write_cmd(output, SDVO_CMD_SET_IN_OUT_MAP,
1007 &in_out, sizeof(in_out));
1008 status = intel_sdvo_read_response(output, NULL, 0);
1009
1010 if (sdvo_priv->is_hdmi) {
1011 intel_sdvo_set_avi_infoframe(output, mode);
1012 sdvox |= SDVO_AUDIO_ENABLE;
1013 }
1014
1015 intel_sdvo_get_dtd_from_mode(&input_dtd, mode);
1016
1017 /* If it's a TV, we already set the output timing in mode_fixup.
1018 * Otherwise, the output timing is equal to the input timing.
1019 */
1020 if (!sdvo_priv->is_tv) {
1021 /* Set the output timing to the screen */
1022 intel_sdvo_set_target_output(output,
1023 sdvo_priv->controlled_output);
1024 intel_sdvo_set_output_timing(output, &input_dtd);
1025 }
614 1026
615 /* Set the input timing to the screen. Assume always input 0. */ 1027 /* Set the input timing to the screen. Assume always input 0. */
616 intel_sdvo_set_target_input(intel_output, true, false); 1028 intel_sdvo_set_target_input(output, true, false);
617 1029
618 /* We would like to use i830_sdvo_create_preferred_input_timing() to 1030 /* We would like to use intel_sdvo_create_preferred_input_timing() to
619 * provide the device with a timing it can support, if it supports that 1031 * provide the device with a timing it can support, if it supports that
620 * feature. However, presumably we would need to adjust the CRTC to 1032 * feature. However, presumably we would need to adjust the CRTC to
621 * output the preferred timing, and we don't support that currently. 1033 * output the preferred timing, and we don't support that currently.
622 */ 1034 */
623 intel_sdvo_set_input_timing(intel_output, &output_dtd); 1035#if 0
1036 success = intel_sdvo_create_preferred_input_timing(output, clock,
1037 width, height);
1038 if (success) {
1039 struct intel_sdvo_dtd *input_dtd;
1040
1041 intel_sdvo_get_preferred_input_timing(output, &input_dtd);
1042 intel_sdvo_set_input_timing(output, &input_dtd);
1043 }
1044#else
1045 intel_sdvo_set_input_timing(output, &input_dtd);
1046#endif
624 1047
625 switch (intel_sdvo_get_pixel_multiplier(mode)) { 1048 switch (intel_sdvo_get_pixel_multiplier(mode)) {
626 case 1: 1049 case 1:
627 intel_sdvo_set_clock_rate_mult(intel_output, 1050 intel_sdvo_set_clock_rate_mult(output,
628 SDVO_CLOCK_RATE_MULT_1X); 1051 SDVO_CLOCK_RATE_MULT_1X);
629 break; 1052 break;
630 case 2: 1053 case 2:
631 intel_sdvo_set_clock_rate_mult(intel_output, 1054 intel_sdvo_set_clock_rate_mult(output,
632 SDVO_CLOCK_RATE_MULT_2X); 1055 SDVO_CLOCK_RATE_MULT_2X);
633 break; 1056 break;
634 case 4: 1057 case 4:
635 intel_sdvo_set_clock_rate_mult(intel_output, 1058 intel_sdvo_set_clock_rate_mult(output,
636 SDVO_CLOCK_RATE_MULT_4X); 1059 SDVO_CLOCK_RATE_MULT_4X);
637 break; 1060 break;
638 } 1061 }
639 1062
640 /* Set the SDVO control regs. */ 1063 /* Set the SDVO control regs. */
641 if (0/*IS_I965GM(dev)*/) { 1064 if (IS_I965G(dev)) {
642 sdvox = SDVO_BORDER_ENABLE; 1065 sdvox |= SDVO_BORDER_ENABLE |
643 } else { 1066 SDVO_VSYNC_ACTIVE_HIGH |
644 sdvox = I915_READ(sdvo_priv->output_device); 1067 SDVO_HSYNC_ACTIVE_HIGH;
645 switch (sdvo_priv->output_device) { 1068 } else {
646 case SDVOB: 1069 sdvox |= I915_READ(sdvo_priv->output_device);
647 sdvox &= SDVOB_PRESERVE_MASK; 1070 switch (sdvo_priv->output_device) {
648 break; 1071 case SDVOB:
649 case SDVOC: 1072 sdvox &= SDVOB_PRESERVE_MASK;
650 sdvox &= SDVOC_PRESERVE_MASK; 1073 break;
651 break; 1074 case SDVOC:
652 } 1075 sdvox &= SDVOC_PRESERVE_MASK;
653 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE; 1076 break;
654 } 1077 }
1078 sdvox |= (9 << 19) | SDVO_BORDER_ENABLE;
1079 }
655 if (intel_crtc->pipe == 1) 1080 if (intel_crtc->pipe == 1)
656 sdvox |= SDVO_PIPE_B_SELECT; 1081 sdvox |= SDVO_PIPE_B_SELECT;
657 1082
658 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode); 1083 sdvo_pixel_multiply = intel_sdvo_get_pixel_multiplier(mode);
659 if (IS_I965G(dev)) { 1084 if (IS_I965G(dev)) {
660 /* done in crtc_mode_set as the dpll_md reg must be written 1085 /* done in crtc_mode_set as the dpll_md reg must be written early */
661 early */ 1086 } else if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev)) {
662 } else if (IS_I945G(dev) || IS_I945GM(dev)) { 1087 /* done in crtc_mode_set as it lives inside the dpll register */
663 /* done in crtc_mode_set as it lives inside the
664 dpll register */
665 } else { 1088 } else {
666 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT; 1089 sdvox |= (sdvo_pixel_multiply - 1) << SDVO_PORT_MULTIPLY_SHIFT;
667 } 1090 }
668 1091
669 intel_sdvo_write_sdvox(intel_output, sdvox); 1092 intel_sdvo_write_sdvox(output, sdvox);
670} 1093}
671 1094
672static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode) 1095static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
@@ -714,7 +1137,7 @@ static void intel_sdvo_dpms(struct drm_encoder *encoder, int mode)
714 1137
715 if (0) 1138 if (0)
716 intel_sdvo_set_encoder_power_state(intel_output, mode); 1139 intel_sdvo_set_encoder_power_state(intel_output, mode);
717 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->active_outputs); 1140 intel_sdvo_set_active_outputs(intel_output, sdvo_priv->controlled_output);
718 } 1141 }
719 return; 1142 return;
720} 1143}
@@ -752,6 +1175,9 @@ static void intel_sdvo_save(struct drm_connector *connector)
752 &sdvo_priv->save_output_dtd[o]); 1175 &sdvo_priv->save_output_dtd[o]);
753 } 1176 }
754 } 1177 }
1178 if (sdvo_priv->is_tv) {
1179 /* XXX: Save TV format/enhancements. */
1180 }
755 1181
756 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device); 1182 sdvo_priv->save_SDVOX = I915_READ(sdvo_priv->output_device);
757} 1183}
@@ -759,7 +1185,6 @@ static void intel_sdvo_save(struct drm_connector *connector)
759static void intel_sdvo_restore(struct drm_connector *connector) 1185static void intel_sdvo_restore(struct drm_connector *connector)
760{ 1186{
761 struct drm_device *dev = connector->dev; 1187 struct drm_device *dev = connector->dev;
762 struct drm_i915_private *dev_priv = dev->dev_private;
763 struct intel_output *intel_output = to_intel_output(connector); 1188 struct intel_output *intel_output = to_intel_output(connector);
764 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 1189 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
765 int o; 1190 int o;
@@ -790,7 +1215,11 @@ static void intel_sdvo_restore(struct drm_connector *connector)
790 1215
791 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult); 1216 intel_sdvo_set_clock_rate_mult(intel_output, sdvo_priv->save_sdvo_mult);
792 1217
793 I915_WRITE(sdvo_priv->output_device, sdvo_priv->save_SDVOX); 1218 if (sdvo_priv->is_tv) {
1219 /* XXX: Restore TV format/enhancements. */
1220 }
1221
1222 intel_sdvo_write_sdvox(intel_output, sdvo_priv->save_SDVOX);
794 1223
795 if (sdvo_priv->save_SDVOX & SDVO_ENABLE) 1224 if (sdvo_priv->save_SDVOX & SDVO_ENABLE)
796 { 1225 {
@@ -916,20 +1345,173 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
916 status = intel_sdvo_read_response(intel_output, &response, 2); 1345 status = intel_sdvo_read_response(intel_output, &response, 2);
917 1346
918 DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]); 1347 DRM_DEBUG("SDVO response %d %d\n", response[0], response[1]);
1348
1349 if (status != SDVO_CMD_STATUS_SUCCESS)
1350 return connector_status_unknown;
1351
919 if ((response[0] != 0) || (response[1] != 0)) 1352 if ((response[0] != 0) || (response[1] != 0))
920 return connector_status_connected; 1353 return connector_status_connected;
921 else 1354 else
922 return connector_status_disconnected; 1355 return connector_status_disconnected;
923} 1356}
924 1357
925static int intel_sdvo_get_modes(struct drm_connector *connector) 1358static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
926{ 1359{
927 struct intel_output *intel_output = to_intel_output(connector); 1360 struct intel_output *intel_output = to_intel_output(connector);
1361 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
928 1362
929 /* set the bus switch and get the modes */ 1363 /* set the bus switch and get the modes */
930 intel_sdvo_set_control_bus_switch(intel_output, SDVO_CONTROL_BUS_DDC2); 1364 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
931 intel_ddc_get_modes(intel_output); 1365 intel_ddc_get_modes(intel_output);
932 1366
1367#if 0
1368 struct drm_device *dev = encoder->dev;
1369 struct drm_i915_private *dev_priv = dev->dev_private;
1370 /* Mac mini hack. On this device, I get DDC through the analog, which
1371 * load-detects as disconnected. I fail to DDC through the SDVO DDC,
1372 * but it does load-detect as connected. So, just steal the DDC bits
1373 * from analog when we fail at finding it the right way.
1374 */
1375 crt = xf86_config->output[0];
1376 intel_output = crt->driver_private;
1377 if (intel_output->type == I830_OUTPUT_ANALOG &&
1378 crt->funcs->detect(crt) == XF86OutputStatusDisconnected) {
1379 I830I2CInit(pScrn, &intel_output->pDDCBus, GPIOA, "CRTDDC_A");
1380 edid_mon = xf86OutputGetEDID(crt, intel_output->pDDCBus);
1381 xf86DestroyI2CBusRec(intel_output->pDDCBus, true, true);
1382 }
1383 if (edid_mon) {
1384 xf86OutputSetEDID(output, edid_mon);
1385 modes = xf86OutputGetEDIDModes(output);
1386 }
1387#endif
1388}
1389
1390/**
1391 * This function checks the current TV format, and chooses a default if
1392 * it hasn't been set.
1393 */
1394static void
1395intel_sdvo_check_tv_format(struct intel_output *output)
1396{
1397 struct intel_sdvo_priv *dev_priv = output->dev_priv;
1398 struct intel_sdvo_tv_format format, unset;
1399 uint8_t status;
1400
1401 intel_sdvo_write_cmd(output, SDVO_CMD_GET_TV_FORMAT, NULL, 0);
1402 status = intel_sdvo_read_response(output, &format, sizeof(format));
1403 if (status != SDVO_CMD_STATUS_SUCCESS)
1404 return;
1405
1406 memset(&unset, 0, sizeof(unset));
1407 if (memcmp(&format, &unset, sizeof(format))) {
1408 DRM_DEBUG("%s: Choosing default TV format of NTSC-M\n",
1409 SDVO_NAME(dev_priv));
1410
1411 format.ntsc_m = true;
1412 intel_sdvo_write_cmd(output, SDVO_CMD_SET_TV_FORMAT, NULL, 0);
1413 status = intel_sdvo_read_response(output, NULL, 0);
1414 }
1415}
1416
1417/*
1418 * Set of SDVO TV modes.
1419 * Note! This is in reply order (see loop in get_tv_modes).
1420 * XXX: all 60Hz refresh?
1421 */
1422struct drm_display_mode sdvo_tv_modes[] = {
1423 { DRM_MODE("320x200", DRM_MODE_TYPE_DRIVER, 5815680, 321, 384, 416,
1424 200, 0, 232, 201, 233, 4196112, 0,
1425 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1426 { DRM_MODE("320x240", DRM_MODE_TYPE_DRIVER, 6814080, 321, 384, 416,
1427 240, 0, 272, 241, 273, 4196112, 0,
1428 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1429 { DRM_MODE("400x300", DRM_MODE_TYPE_DRIVER, 9910080, 401, 464, 496,
1430 300, 0, 332, 301, 333, 4196112, 0,
1431 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1432 { DRM_MODE("640x350", DRM_MODE_TYPE_DRIVER, 16913280, 641, 704, 736,
1433 350, 0, 382, 351, 383, 4196112, 0,
1434 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1435 { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736,
1436 400, 0, 432, 401, 433, 4196112, 0,
1437 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1438 { DRM_MODE("640x400", DRM_MODE_TYPE_DRIVER, 19121280, 641, 704, 736,
1439 400, 0, 432, 401, 433, 4196112, 0,
1440 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1441 { DRM_MODE("704x480", DRM_MODE_TYPE_DRIVER, 24624000, 705, 768, 800,
1442 480, 0, 512, 481, 513, 4196112, 0,
1443 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1444 { DRM_MODE("704x576", DRM_MODE_TYPE_DRIVER, 29232000, 705, 768, 800,
1445 576, 0, 608, 577, 609, 4196112, 0,
1446 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1447 { DRM_MODE("720x350", DRM_MODE_TYPE_DRIVER, 18751680, 721, 784, 816,
1448 350, 0, 382, 351, 383, 4196112, 0,
1449 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1450 { DRM_MODE("720x400", DRM_MODE_TYPE_DRIVER, 21199680, 721, 784, 816,
1451 400, 0, 432, 401, 433, 4196112, 0,
1452 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1453 { DRM_MODE("720x480", DRM_MODE_TYPE_DRIVER, 25116480, 721, 784, 816,
1454 480, 0, 512, 481, 513, 4196112, 0,
1455 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1456 { DRM_MODE("720x540", DRM_MODE_TYPE_DRIVER, 28054080, 721, 784, 816,
1457 540, 0, 572, 541, 573, 4196112, 0,
1458 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1459 { DRM_MODE("720x576", DRM_MODE_TYPE_DRIVER, 29816640, 721, 784, 816,
1460 576, 0, 608, 577, 609, 4196112, 0,
1461 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1462 { DRM_MODE("768x576", DRM_MODE_TYPE_DRIVER, 31570560, 769, 832, 864,
1463 576, 0, 608, 577, 609, 4196112, 0,
1464 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1465 { DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 34030080, 801, 864, 896,
1466 600, 0, 632, 601, 633, 4196112, 0,
1467 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1468 { DRM_MODE("832x624", DRM_MODE_TYPE_DRIVER, 36581760, 833, 896, 928,
1469 624, 0, 656, 625, 657, 4196112, 0,
1470 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1471 { DRM_MODE("920x766", DRM_MODE_TYPE_DRIVER, 48707040, 921, 984, 1016,
1472 766, 0, 798, 767, 799, 4196112, 0,
1473 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1474 { DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 53827200, 1025, 1088, 1120,
1475 768, 0, 800, 769, 801, 4196112, 0,
1476 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1477 { DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 87265920, 1281, 1344, 1376,
1478 1024, 0, 1056, 1025, 1057, 4196112, 0,
1479 DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
1480};
1481
1482static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1483{
1484 struct intel_output *output = to_intel_output(connector);
1485 uint32_t reply = 0;
1486 uint8_t status;
1487 int i = 0;
1488
1489 intel_sdvo_check_tv_format(output);
1490
1491 /* Read the list of supported input resolutions for the selected TV
1492 * format.
1493 */
1494 intel_sdvo_write_cmd(output, SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT,
1495 NULL, 0);
1496 status = intel_sdvo_read_response(output, &reply, 3);
1497 if (status != SDVO_CMD_STATUS_SUCCESS)
1498 return;
1499
1500 for (i = 0; i < ARRAY_SIZE(sdvo_tv_modes); i++)
1501 if (reply & (1 << i))
1502 drm_mode_probed_add(connector, &sdvo_tv_modes[i]);
1503}
1504
1505static int intel_sdvo_get_modes(struct drm_connector *connector)
1506{
1507 struct intel_output *output = to_intel_output(connector);
1508 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1509
1510 if (sdvo_priv->is_tv)
1511 intel_sdvo_get_tv_modes(connector);
1512 else
1513 intel_sdvo_get_ddc_modes(connector);
1514
933 if (list_empty(&connector->probed_modes)) 1515 if (list_empty(&connector->probed_modes))
934 return 0; 1516 return 0;
935 return 1; 1517 return 1;
@@ -978,6 +1560,65 @@ static const struct drm_encoder_funcs intel_sdvo_enc_funcs = {
978}; 1560};
979 1561
980 1562
1563/**
1564 * Choose the appropriate DDC bus for control bus switch command for this
1565 * SDVO output based on the controlled output.
1566 *
1567 * DDC bus number assignment is in a priority order of RGB outputs, then TMDS
1568 * outputs, then LVDS outputs.
1569 */
1570static void
1571intel_sdvo_select_ddc_bus(struct intel_sdvo_priv *dev_priv)
1572{
1573 uint16_t mask = 0;
1574 unsigned int num_bits;
1575
1576 /* Make a mask of outputs less than or equal to our own priority in the
1577 * list.
1578 */
1579 switch (dev_priv->controlled_output) {
1580 case SDVO_OUTPUT_LVDS1:
1581 mask |= SDVO_OUTPUT_LVDS1;
1582 case SDVO_OUTPUT_LVDS0:
1583 mask |= SDVO_OUTPUT_LVDS0;
1584 case SDVO_OUTPUT_TMDS1:
1585 mask |= SDVO_OUTPUT_TMDS1;
1586 case SDVO_OUTPUT_TMDS0:
1587 mask |= SDVO_OUTPUT_TMDS0;
1588 case SDVO_OUTPUT_RGB1:
1589 mask |= SDVO_OUTPUT_RGB1;
1590 case SDVO_OUTPUT_RGB0:
1591 mask |= SDVO_OUTPUT_RGB0;
1592 break;
1593 }
1594
1595 /* Count bits to find what number we are in the priority list. */
1596 mask &= dev_priv->caps.output_flags;
1597 num_bits = hweight16(mask);
1598 if (num_bits > 3) {
1599 /* if more than 3 outputs, default to DDC bus 3 for now */
1600 num_bits = 3;
1601 }
1602
1603 /* Corresponds to SDVO_CONTROL_BUS_DDCx */
1604 dev_priv->ddc_bus = 1 << num_bits;
1605}
1606
1607static bool
1608intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
1609{
1610 struct intel_sdvo_priv *sdvo_priv = output->dev_priv;
1611 uint8_t status;
1612
1613 intel_sdvo_set_target_output(output, sdvo_priv->controlled_output);
1614
1615 intel_sdvo_write_cmd(output, SDVO_CMD_GET_ENCODE, NULL, 0);
1616 status = intel_sdvo_read_response(output, &sdvo_priv->is_hdmi, 1);
1617 if (status != SDVO_CMD_STATUS_SUCCESS)
1618 return false;
1619 return true;
1620}
1621
981bool intel_sdvo_init(struct drm_device *dev, int output_device) 1622bool intel_sdvo_init(struct drm_device *dev, int output_device)
982{ 1623{
983 struct drm_connector *connector; 1624 struct drm_connector *connector;
@@ -1040,45 +1681,76 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1040 1681
1041 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 1682 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
1042 1683
1043 memset(&sdvo_priv->active_outputs, 0, sizeof(sdvo_priv->active_outputs)); 1684 if (sdvo_priv->caps.output_flags &
1685 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_TMDS1)) {
1686 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0)
1687 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS0;
1688 else
1689 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
1690
1691 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1692 encoder_type = DRM_MODE_ENCODER_TMDS;
1693 connector_type = DRM_MODE_CONNECTOR_DVID;
1044 1694
1045 /* TODO, CVBS, SVID, YPRPB & SCART outputs. */ 1695 if (intel_sdvo_get_supp_encode(intel_output,
1046 if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) 1696 &sdvo_priv->encode) &&
1697 intel_sdvo_get_digital_encoding_mode(intel_output) &&
1698 sdvo_priv->is_hdmi) {
1699 /* enable hdmi encoding mode if supported */
1700 intel_sdvo_set_encode(intel_output, SDVO_ENCODE_HDMI);
1701 intel_sdvo_set_colorimetry(intel_output,
1702 SDVO_COLORIMETRY_RGB256);
1703 connector_type = DRM_MODE_CONNECTOR_HDMIA;
1704 }
1705 }
1706 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
1047 { 1707 {
1048 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB0; 1708 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
1709 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1710 encoder_type = DRM_MODE_ENCODER_TVDAC;
1711 connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1712 sdvo_priv->is_tv = true;
1713 intel_output->needs_tv_clock = true;
1714 }
1715 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
1716 {
1717 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1049 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1718 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1050 encoder_type = DRM_MODE_ENCODER_DAC; 1719 encoder_type = DRM_MODE_ENCODER_DAC;
1051 connector_type = DRM_MODE_CONNECTOR_VGA; 1720 connector_type = DRM_MODE_CONNECTOR_VGA;
1052 } 1721 }
1053 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) 1722 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
1054 { 1723 {
1055 sdvo_priv->active_outputs = SDVO_OUTPUT_RGB1; 1724 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
1056 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1725 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1057 encoder_type = DRM_MODE_ENCODER_DAC; 1726 encoder_type = DRM_MODE_ENCODER_DAC;
1058 connector_type = DRM_MODE_CONNECTOR_VGA; 1727 connector_type = DRM_MODE_CONNECTOR_VGA;
1059 } 1728 }
1060 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS0) 1729 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
1061 { 1730 {
1062 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS0; 1731 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
1063 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1732 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1064 encoder_type = DRM_MODE_ENCODER_TMDS; 1733 encoder_type = DRM_MODE_ENCODER_LVDS;
1065 connector_type = DRM_MODE_CONNECTOR_DVID; 1734 connector_type = DRM_MODE_CONNECTOR_LVDS;
1066 } 1735 }
1067 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_TMDS1) 1736 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
1068 { 1737 {
1069 sdvo_priv->active_outputs = SDVO_OUTPUT_TMDS1; 1738 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1070 connector->display_info.subpixel_order = SubPixelHorizontalRGB; 1739 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1071 encoder_type = DRM_MODE_ENCODER_TMDS; 1740 encoder_type = DRM_MODE_ENCODER_LVDS;
1072 connector_type = DRM_MODE_CONNECTOR_DVID; 1741 connector_type = DRM_MODE_CONNECTOR_LVDS;
1073 } 1742 }
1074 else 1743 else
1075 { 1744 {
1076 unsigned char bytes[2]; 1745 unsigned char bytes[2];
1077 1746
1747 sdvo_priv->controlled_output = 0;
1078 memcpy (bytes, &sdvo_priv->caps.output_flags, 2); 1748 memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
1079 DRM_DEBUG("%s: No active RGB or TMDS outputs (0x%02x%02x)\n", 1749 DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n",
1080 SDVO_NAME(sdvo_priv), 1750 SDVO_NAME(sdvo_priv),
1081 bytes[0], bytes[1]); 1751 bytes[0], bytes[1]);
1752 encoder_type = DRM_MODE_ENCODER_NONE;
1753 connector_type = DRM_MODE_CONNECTOR_Unknown;
1082 goto err_i2c; 1754 goto err_i2c;
1083 } 1755 }
1084 1756
@@ -1089,6 +1761,8 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1089 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1761 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1090 drm_sysfs_connector_add(connector); 1762 drm_sysfs_connector_add(connector);
1091 1763
1764 intel_sdvo_select_ddc_bus(sdvo_priv);
1765
1092 /* Set the input timing to the screen. Assume always input 0. */ 1766 /* Set the input timing to the screen. Assume always input 0. */
1093 intel_sdvo_set_target_input(intel_output, true, false); 1767 intel_sdvo_set_target_input(intel_output, true, false);
1094 1768
diff --git a/drivers/gpu/drm/i915/intel_sdvo_regs.h b/drivers/gpu/drm/i915/intel_sdvo_regs.h
index 861a43f8693c..1117b9c151a6 100644
--- a/drivers/gpu/drm/i915/intel_sdvo_regs.h
+++ b/drivers/gpu/drm/i915/intel_sdvo_regs.h
@@ -173,6 +173,9 @@ struct intel_sdvo_get_trained_inputs_response {
173 * Returns two struct intel_sdvo_output_flags structures. 173 * Returns two struct intel_sdvo_output_flags structures.
174 */ 174 */
175#define SDVO_CMD_GET_IN_OUT_MAP 0x06 175#define SDVO_CMD_GET_IN_OUT_MAP 0x06
176struct intel_sdvo_in_out_map {
177 u16 in0, in1;
178};
176 179
177/** 180/**
178 * Sets the current mapping of SDVO inputs to outputs on the device. 181 * Sets the current mapping of SDVO inputs to outputs on the device.
@@ -206,7 +209,8 @@ struct intel_sdvo_get_trained_inputs_response {
206struct intel_sdvo_get_interrupt_event_source_response { 209struct intel_sdvo_get_interrupt_event_source_response {
207 u16 interrupt_status; 210 u16 interrupt_status;
208 unsigned int ambient_light_interrupt:1; 211 unsigned int ambient_light_interrupt:1;
209 unsigned int pad:7; 212 unsigned int hdmi_audio_encrypt_change:1;
213 unsigned int pad:6;
210} __attribute__((packed)); 214} __attribute__((packed));
211 215
212/** 216/**
@@ -305,23 +309,411 @@ struct intel_sdvo_set_target_input_args {
305# define SDVO_CLOCK_RATE_MULT_4X (1 << 3) 309# define SDVO_CLOCK_RATE_MULT_4X (1 << 3)
306 310
307#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27 311#define SDVO_CMD_GET_SUPPORTED_TV_FORMATS 0x27
312/** 5 bytes of bit flags for TV formats shared by all TV format functions */
313struct intel_sdvo_tv_format {
314 unsigned int ntsc_m:1;
315 unsigned int ntsc_j:1;
316 unsigned int ntsc_443:1;
317 unsigned int pal_b:1;
318 unsigned int pal_d:1;
319 unsigned int pal_g:1;
320 unsigned int pal_h:1;
321 unsigned int pal_i:1;
322
323 unsigned int pal_m:1;
324 unsigned int pal_n:1;
325 unsigned int pal_nc:1;
326 unsigned int pal_60:1;
327 unsigned int secam_b:1;
328 unsigned int secam_d:1;
329 unsigned int secam_g:1;
330 unsigned int secam_k:1;
331
332 unsigned int secam_k1:1;
333 unsigned int secam_l:1;
334 unsigned int secam_60:1;
335 unsigned int hdtv_std_smpte_240m_1080i_59:1;
336 unsigned int hdtv_std_smpte_240m_1080i_60:1;
337 unsigned int hdtv_std_smpte_260m_1080i_59:1;
338 unsigned int hdtv_std_smpte_260m_1080i_60:1;
339 unsigned int hdtv_std_smpte_274m_1080i_50:1;
340
341 unsigned int hdtv_std_smpte_274m_1080i_59:1;
342 unsigned int hdtv_std_smpte_274m_1080i_60:1;
343 unsigned int hdtv_std_smpte_274m_1080p_23:1;
344 unsigned int hdtv_std_smpte_274m_1080p_24:1;
345 unsigned int hdtv_std_smpte_274m_1080p_25:1;
346 unsigned int hdtv_std_smpte_274m_1080p_29:1;
347 unsigned int hdtv_std_smpte_274m_1080p_30:1;
348 unsigned int hdtv_std_smpte_274m_1080p_50:1;
349
350 unsigned int hdtv_std_smpte_274m_1080p_59:1;
351 unsigned int hdtv_std_smpte_274m_1080p_60:1;
352 unsigned int hdtv_std_smpte_295m_1080i_50:1;
353 unsigned int hdtv_std_smpte_295m_1080p_50:1;
354 unsigned int hdtv_std_smpte_296m_720p_59:1;
355 unsigned int hdtv_std_smpte_296m_720p_60:1;
356 unsigned int hdtv_std_smpte_296m_720p_50:1;
357 unsigned int hdtv_std_smpte_293m_480p_59:1;
358
359 unsigned int hdtv_std_smpte_170m_480i_59:1;
360 unsigned int hdtv_std_iturbt601_576i_50:1;
361 unsigned int hdtv_std_iturbt601_576p_50:1;
362 unsigned int hdtv_std_eia_7702a_480i_60:1;
363 unsigned int hdtv_std_eia_7702a_480p_60:1;
364 unsigned int pad:3;
365} __attribute__((packed));
308 366
309#define SDVO_CMD_GET_TV_FORMAT 0x28 367#define SDVO_CMD_GET_TV_FORMAT 0x28
310 368
311#define SDVO_CMD_SET_TV_FORMAT 0x29 369#define SDVO_CMD_SET_TV_FORMAT 0x29
312 370
371/** Returns the resolutiosn that can be used with the given TV format */
372#define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT 0x83
373struct intel_sdvo_sdtv_resolution_request {
374 unsigned int ntsc_m:1;
375 unsigned int ntsc_j:1;
376 unsigned int ntsc_443:1;
377 unsigned int pal_b:1;
378 unsigned int pal_d:1;
379 unsigned int pal_g:1;
380 unsigned int pal_h:1;
381 unsigned int pal_i:1;
382
383 unsigned int pal_m:1;
384 unsigned int pal_n:1;
385 unsigned int pal_nc:1;
386 unsigned int pal_60:1;
387 unsigned int secam_b:1;
388 unsigned int secam_d:1;
389 unsigned int secam_g:1;
390 unsigned int secam_k:1;
391
392 unsigned int secam_k1:1;
393 unsigned int secam_l:1;
394 unsigned int secam_60:1;
395 unsigned int pad:5;
396} __attribute__((packed));
397
398struct intel_sdvo_sdtv_resolution_reply {
399 unsigned int res_320x200:1;
400 unsigned int res_320x240:1;
401 unsigned int res_400x300:1;
402 unsigned int res_640x350:1;
403 unsigned int res_640x400:1;
404 unsigned int res_640x480:1;
405 unsigned int res_704x480:1;
406 unsigned int res_704x576:1;
407
408 unsigned int res_720x350:1;
409 unsigned int res_720x400:1;
410 unsigned int res_720x480:1;
411 unsigned int res_720x540:1;
412 unsigned int res_720x576:1;
413 unsigned int res_768x576:1;
414 unsigned int res_800x600:1;
415 unsigned int res_832x624:1;
416
417 unsigned int res_920x766:1;
418 unsigned int res_1024x768:1;
419 unsigned int res_1280x1024:1;
420 unsigned int pad:5;
421} __attribute__((packed));
422
423/* Get supported resolution with squire pixel aspect ratio that can be
424 scaled for the requested HDTV format */
425#define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT 0x85
426
427struct intel_sdvo_hdtv_resolution_request {
428 unsigned int hdtv_std_smpte_240m_1080i_59:1;
429 unsigned int hdtv_std_smpte_240m_1080i_60:1;
430 unsigned int hdtv_std_smpte_260m_1080i_59:1;
431 unsigned int hdtv_std_smpte_260m_1080i_60:1;
432 unsigned int hdtv_std_smpte_274m_1080i_50:1;
433 unsigned int hdtv_std_smpte_274m_1080i_59:1;
434 unsigned int hdtv_std_smpte_274m_1080i_60:1;
435 unsigned int hdtv_std_smpte_274m_1080p_23:1;
436
437 unsigned int hdtv_std_smpte_274m_1080p_24:1;
438 unsigned int hdtv_std_smpte_274m_1080p_25:1;
439 unsigned int hdtv_std_smpte_274m_1080p_29:1;
440 unsigned int hdtv_std_smpte_274m_1080p_30:1;
441 unsigned int hdtv_std_smpte_274m_1080p_50:1;
442 unsigned int hdtv_std_smpte_274m_1080p_59:1;
443 unsigned int hdtv_std_smpte_274m_1080p_60:1;
444 unsigned int hdtv_std_smpte_295m_1080i_50:1;
445
446 unsigned int hdtv_std_smpte_295m_1080p_50:1;
447 unsigned int hdtv_std_smpte_296m_720p_59:1;
448 unsigned int hdtv_std_smpte_296m_720p_60:1;
449 unsigned int hdtv_std_smpte_296m_720p_50:1;
450 unsigned int hdtv_std_smpte_293m_480p_59:1;
451 unsigned int hdtv_std_smpte_170m_480i_59:1;
452 unsigned int hdtv_std_iturbt601_576i_50:1;
453 unsigned int hdtv_std_iturbt601_576p_50:1;
454
455 unsigned int hdtv_std_eia_7702a_480i_60:1;
456 unsigned int hdtv_std_eia_7702a_480p_60:1;
457 unsigned int pad:6;
458} __attribute__((packed));
459
460struct intel_sdvo_hdtv_resolution_reply {
461 unsigned int res_640x480:1;
462 unsigned int res_800x600:1;
463 unsigned int res_1024x768:1;
464 unsigned int res_1280x960:1;
465 unsigned int res_1400x1050:1;
466 unsigned int res_1600x1200:1;
467 unsigned int res_1920x1440:1;
468 unsigned int res_2048x1536:1;
469
470 unsigned int res_2560x1920:1;
471 unsigned int res_3200x2400:1;
472 unsigned int res_3840x2880:1;
473 unsigned int pad1:5;
474
475 unsigned int res_848x480:1;
476 unsigned int res_1064x600:1;
477 unsigned int res_1280x720:1;
478 unsigned int res_1360x768:1;
479 unsigned int res_1704x960:1;
480 unsigned int res_1864x1050:1;
481 unsigned int res_1920x1080:1;
482 unsigned int res_2128x1200:1;
483
484 unsigned int res_2560x1400:1;
485 unsigned int res_2728x1536:1;
486 unsigned int res_3408x1920:1;
487 unsigned int res_4264x2400:1;
488 unsigned int res_5120x2880:1;
489 unsigned int pad2:3;
490
491 unsigned int res_768x480:1;
492 unsigned int res_960x600:1;
493 unsigned int res_1152x720:1;
494 unsigned int res_1124x768:1;
495 unsigned int res_1536x960:1;
496 unsigned int res_1680x1050:1;
497 unsigned int res_1728x1080:1;
498 unsigned int res_1920x1200:1;
499
500 unsigned int res_2304x1440:1;
501 unsigned int res_2456x1536:1;
502 unsigned int res_3072x1920:1;
503 unsigned int res_3840x2400:1;
504 unsigned int res_4608x2880:1;
505 unsigned int pad3:3;
506
507 unsigned int res_1280x1024:1;
508 unsigned int pad4:7;
509
510 unsigned int res_1280x768:1;
511 unsigned int pad5:7;
512} __attribute__((packed));
513
514/* Get supported power state returns info for encoder and monitor, rely on
515 last SetTargetInput and SetTargetOutput calls */
313#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a 516#define SDVO_CMD_GET_SUPPORTED_POWER_STATES 0x2a
517/* Get power state returns info for encoder and monitor, rely on last
518 SetTargetInput and SetTargetOutput calls */
519#define SDVO_CMD_GET_POWER_STATE 0x2b
314#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b 520#define SDVO_CMD_GET_ENCODER_POWER_STATE 0x2b
315#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c 521#define SDVO_CMD_SET_ENCODER_POWER_STATE 0x2c
316# define SDVO_ENCODER_STATE_ON (1 << 0) 522# define SDVO_ENCODER_STATE_ON (1 << 0)
317# define SDVO_ENCODER_STATE_STANDBY (1 << 1) 523# define SDVO_ENCODER_STATE_STANDBY (1 << 1)
318# define SDVO_ENCODER_STATE_SUSPEND (1 << 2) 524# define SDVO_ENCODER_STATE_SUSPEND (1 << 2)
319# define SDVO_ENCODER_STATE_OFF (1 << 3) 525# define SDVO_ENCODER_STATE_OFF (1 << 3)
526# define SDVO_MONITOR_STATE_ON (1 << 4)
527# define SDVO_MONITOR_STATE_STANDBY (1 << 5)
528# define SDVO_MONITOR_STATE_SUSPEND (1 << 6)
529# define SDVO_MONITOR_STATE_OFF (1 << 7)
530
531#define SDVO_CMD_GET_MAX_PANEL_POWER_SEQUENCING 0x2d
532#define SDVO_CMD_GET_PANEL_POWER_SEQUENCING 0x2e
533#define SDVO_CMD_SET_PANEL_POWER_SEQUENCING 0x2f
534/**
535 * The panel power sequencing parameters are in units of milliseconds.
536 * The high fields are bits 8:9 of the 10-bit values.
537 */
538struct sdvo_panel_power_sequencing {
539 u8 t0;
540 u8 t1;
541 u8 t2;
542 u8 t3;
543 u8 t4;
544
545 unsigned int t0_high:2;
546 unsigned int t1_high:2;
547 unsigned int t2_high:2;
548 unsigned int t3_high:2;
549
550 unsigned int t4_high:2;
551 unsigned int pad:6;
552} __attribute__((packed));
553
554#define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL 0x30
555struct sdvo_max_backlight_reply {
556 u8 max_value;
557 u8 default_value;
558} __attribute__((packed));
559
560#define SDVO_CMD_GET_BACKLIGHT_LEVEL 0x31
561#define SDVO_CMD_SET_BACKLIGHT_LEVEL 0x32
562
563#define SDVO_CMD_GET_AMBIENT_LIGHT 0x33
564struct sdvo_get_ambient_light_reply {
565 u16 trip_low;
566 u16 trip_high;
567 u16 value;
568} __attribute__((packed));
569#define SDVO_CMD_SET_AMBIENT_LIGHT 0x34
570struct sdvo_set_ambient_light_reply {
571 u16 trip_low;
572 u16 trip_high;
573 unsigned int enable:1;
574 unsigned int pad:7;
575} __attribute__((packed));
576
577/* Set display power state */
578#define SDVO_CMD_SET_DISPLAY_POWER_STATE 0x7d
579# define SDVO_DISPLAY_STATE_ON (1 << 0)
580# define SDVO_DISPLAY_STATE_STANDBY (1 << 1)
581# define SDVO_DISPLAY_STATE_SUSPEND (1 << 2)
582# define SDVO_DISPLAY_STATE_OFF (1 << 3)
583
584#define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS 0x84
585struct intel_sdvo_enhancements_reply {
586 unsigned int flicker_filter:1;
587 unsigned int flicker_filter_adaptive:1;
588 unsigned int flicker_filter_2d:1;
589 unsigned int saturation:1;
590 unsigned int hue:1;
591 unsigned int brightness:1;
592 unsigned int contrast:1;
593 unsigned int overscan_h:1;
594
595 unsigned int overscan_v:1;
596 unsigned int position_h:1;
597 unsigned int position_v:1;
598 unsigned int sharpness:1;
599 unsigned int dot_crawl:1;
600 unsigned int dither:1;
601 unsigned int max_tv_chroma_filter:1;
602 unsigned int max_tv_luma_filter:1;
603} __attribute__((packed));
604
605/* Picture enhancement limits below are dependent on the current TV format,
606 * and thus need to be queried and set after it.
607 */
608#define SDVO_CMD_GET_MAX_FLICKER_FITER 0x4d
609#define SDVO_CMD_GET_MAX_ADAPTIVE_FLICKER_FITER 0x7b
610#define SDVO_CMD_GET_MAX_2D_FLICKER_FITER 0x52
611#define SDVO_CMD_GET_MAX_SATURATION 0x55
612#define SDVO_CMD_GET_MAX_HUE 0x58
613#define SDVO_CMD_GET_MAX_BRIGHTNESS 0x5b
614#define SDVO_CMD_GET_MAX_CONTRAST 0x5e
615#define SDVO_CMD_GET_MAX_OVERSCAN_H 0x61
616#define SDVO_CMD_GET_MAX_OVERSCAN_V 0x64
617#define SDVO_CMD_GET_MAX_POSITION_H 0x67
618#define SDVO_CMD_GET_MAX_POSITION_V 0x6a
619#define SDVO_CMD_GET_MAX_SHARPNESS_V 0x6d
620#define SDVO_CMD_GET_MAX_TV_CHROMA 0x74
621#define SDVO_CMD_GET_MAX_TV_LUMA 0x77
622struct intel_sdvo_enhancement_limits_reply {
623 u16 max_value;
624 u16 default_value;
625} __attribute__((packed));
320 626
321#define SDVO_CMD_SET_TV_RESOLUTION_SUPPORT 0x93 627#define SDVO_CMD_GET_LVDS_PANEL_INFORMATION 0x7f
628#define SDVO_CMD_SET_LVDS_PANEL_INFORMATION 0x80
629# define SDVO_LVDS_COLOR_DEPTH_18 (0 << 0)
630# define SDVO_LVDS_COLOR_DEPTH_24 (1 << 0)
631# define SDVO_LVDS_CONNECTOR_SPWG (0 << 2)
632# define SDVO_LVDS_CONNECTOR_OPENLDI (1 << 2)
633# define SDVO_LVDS_SINGLE_CHANNEL (0 << 4)
634# define SDVO_LVDS_DUAL_CHANNEL (1 << 4)
635
636#define SDVO_CMD_GET_FLICKER_FILTER 0x4e
637#define SDVO_CMD_SET_FLICKER_FILTER 0x4f
638#define SDVO_CMD_GET_ADAPTIVE_FLICKER_FITER 0x50
639#define SDVO_CMD_SET_ADAPTIVE_FLICKER_FITER 0x51
640#define SDVO_CMD_GET_2D_FLICKER_FITER 0x53
641#define SDVO_CMD_SET_2D_FLICKER_FITER 0x54
642#define SDVO_CMD_GET_SATURATION 0x56
643#define SDVO_CMD_SET_SATURATION 0x57
644#define SDVO_CMD_GET_HUE 0x59
645#define SDVO_CMD_SET_HUE 0x5a
646#define SDVO_CMD_GET_BRIGHTNESS 0x5c
647#define SDVO_CMD_SET_BRIGHTNESS 0x5d
648#define SDVO_CMD_GET_CONTRAST 0x5f
649#define SDVO_CMD_SET_CONTRAST 0x60
650#define SDVO_CMD_GET_OVERSCAN_H 0x62
651#define SDVO_CMD_SET_OVERSCAN_H 0x63
652#define SDVO_CMD_GET_OVERSCAN_V 0x65
653#define SDVO_CMD_SET_OVERSCAN_V 0x66
654#define SDVO_CMD_GET_POSITION_H 0x68
655#define SDVO_CMD_SET_POSITION_H 0x69
656#define SDVO_CMD_GET_POSITION_V 0x6b
657#define SDVO_CMD_SET_POSITION_V 0x6c
658#define SDVO_CMD_GET_SHARPNESS 0x6e
659#define SDVO_CMD_SET_SHARPNESS 0x6f
660#define SDVO_CMD_GET_TV_CHROMA 0x75
661#define SDVO_CMD_SET_TV_CHROMA 0x76
662#define SDVO_CMD_GET_TV_LUMA 0x78
663#define SDVO_CMD_SET_TV_LUMA 0x79
664struct intel_sdvo_enhancements_arg {
665 u16 value;
666}__attribute__((packed));
667
668#define SDVO_CMD_GET_DOT_CRAWL 0x70
669#define SDVO_CMD_SET_DOT_CRAWL 0x71
670# define SDVO_DOT_CRAWL_ON (1 << 0)
671# define SDVO_DOT_CRAWL_DEFAULT_ON (1 << 1)
672
673#define SDVO_CMD_GET_DITHER 0x72
674#define SDVO_CMD_SET_DITHER 0x73
675# define SDVO_DITHER_ON (1 << 0)
676# define SDVO_DITHER_DEFAULT_ON (1 << 1)
322 677
323#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a 678#define SDVO_CMD_SET_CONTROL_BUS_SWITCH 0x7a
324# define SDVO_CONTROL_BUS_PROM 0x0 679# define SDVO_CONTROL_BUS_PROM (1 << 0)
325# define SDVO_CONTROL_BUS_DDC1 0x1 680# define SDVO_CONTROL_BUS_DDC1 (1 << 1)
326# define SDVO_CONTROL_BUS_DDC2 0x2 681# define SDVO_CONTROL_BUS_DDC2 (1 << 2)
327# define SDVO_CONTROL_BUS_DDC3 0x3 682# define SDVO_CONTROL_BUS_DDC3 (1 << 3)
683
684/* HDMI op codes */
685#define SDVO_CMD_GET_SUPP_ENCODE 0x9d
686#define SDVO_CMD_GET_ENCODE 0x9e
687#define SDVO_CMD_SET_ENCODE 0x9f
688 #define SDVO_ENCODE_DVI 0x0
689 #define SDVO_ENCODE_HDMI 0x1
690#define SDVO_CMD_SET_PIXEL_REPLI 0x8b
691#define SDVO_CMD_GET_PIXEL_REPLI 0x8c
692#define SDVO_CMD_GET_COLORIMETRY_CAP 0x8d
693#define SDVO_CMD_SET_COLORIMETRY 0x8e
694 #define SDVO_COLORIMETRY_RGB256 0x0
695 #define SDVO_COLORIMETRY_RGB220 0x1
696 #define SDVO_COLORIMETRY_YCrCb422 0x3
697 #define SDVO_COLORIMETRY_YCrCb444 0x4
698#define SDVO_CMD_GET_COLORIMETRY 0x8f
699#define SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER 0x90
700#define SDVO_CMD_SET_AUDIO_STAT 0x91
701#define SDVO_CMD_GET_AUDIO_STAT 0x92
702#define SDVO_CMD_SET_HBUF_INDEX 0x93
703#define SDVO_CMD_GET_HBUF_INDEX 0x94
704#define SDVO_CMD_GET_HBUF_INFO 0x95
705#define SDVO_CMD_SET_HBUF_AV_SPLIT 0x96
706#define SDVO_CMD_GET_HBUF_AV_SPLIT 0x97
707#define SDVO_CMD_SET_HBUF_DATA 0x98
708#define SDVO_CMD_GET_HBUF_DATA 0x99
709#define SDVO_CMD_SET_HBUF_TXRATE 0x9a
710#define SDVO_CMD_GET_HBUF_TXRATE 0x9b
711 #define SDVO_HBUF_TX_DISABLED (0 << 6)
712 #define SDVO_HBUF_TX_ONCE (2 << 6)
713 #define SDVO_HBUF_TX_VSYNC (3 << 6)
714#define SDVO_CMD_GET_AUDIO_TX_INFO 0x9c
715
716struct intel_sdvo_encode{
717 u8 dvi_rev;
718 u8 hdmi_rev;
719} __attribute__ ((packed));
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index fbb35dc56f5c..56485d67369b 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -411,7 +411,7 @@ struct tv_mode {
411 * These values account for -1s required. 411 * These values account for -1s required.
412 */ 412 */
413 413
414const static struct tv_mode tv_modes[] = { 414static const struct tv_mode tv_modes[] = {
415 { 415 {
416 .name = "NTSC-M", 416 .name = "NTSC-M",
417 .clock = 107520, 417 .clock = 107520,