aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
committerDavid S. Miller <davem@davemloft.net>2009-06-15 06:02:23 -0400
commit9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch)
tree8d104ec2a459346b99413b0b77421ca7b9936c1a /drivers/gpu
parentca44d6e60f9de26281fda203f58b570e1748c015 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/scsi/fcoe/fcoe.c net/core/drop_monitor.c net/core/net-traces.c
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/Kconfig14
-rw-r--r--drivers/gpu/drm/drm_bufs.c11
-rw-r--r--drivers/gpu/drm/drm_crtc.c7
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c109
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_edid.c79
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_hashtab.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c8
-rw-r--r--drivers/gpu/drm/drm_mm.c165
-rw-r--r--drivers/gpu/drm/drm_modes.c18
-rw-r--r--drivers/gpu/drm/drm_stub.c17
-rw-r--r--drivers/gpu/drm/drm_sysfs.c7
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c79
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h51
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c221
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c166
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c190
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h636
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c20
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c188
-rw-r--r--drivers/gpu/drm/i915/intel_bios.h118
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c231
-rw-r--r--drivers/gpu/drm/i915/intel_display.c671
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c1
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c26
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c163
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c248
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c4
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_cp.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h6
-rw-r--r--drivers/gpu/drm/via/via_dmablit.c6
34 files changed, 3075 insertions, 475 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 4cd35d8fd799..f5d46e7199d4 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -67,12 +67,18 @@ config DRM_I830
67 will load the correct one. 67 will load the correct one.
68 68
69config DRM_I915 69config DRM_I915
70 tristate "i915 driver"
70 select FB_CFB_FILLRECT 71 select FB_CFB_FILLRECT
71 select FB_CFB_COPYAREA 72 select FB_CFB_COPYAREA
72 select FB_CFB_IMAGEBLIT 73 select FB_CFB_IMAGEBLIT
73 select FB 74 select FB
74 select FRAMEBUFFER_CONSOLE if !EMBEDDED 75 select FRAMEBUFFER_CONSOLE if !EMBEDDED
75 tristate "i915 driver" 76 # i915 depends on ACPI_VIDEO when ACPI is enabled
77 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
78 select VIDEO_OUTPUT_CONTROL if ACPI
79 select BACKLIGHT_CLASS_DEVICE if ACPI
80 select INPUT if ACPI
81 select ACPI_VIDEO if ACPI
76 help 82 help
77 Choose this option if you have a system that has Intel 830M, 845G, 83 Choose this option if you have a system that has Intel 830M, 845G,
78 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the 84 852GM, 855GM 865G or 915G integrated graphics. If M is selected, the
@@ -84,12 +90,6 @@ config DRM_I915
84config DRM_I915_KMS 90config DRM_I915_KMS
85 bool "Enable modesetting on intel by default" 91 bool "Enable modesetting on intel by default"
86 depends on DRM_I915 92 depends on DRM_I915
87 # i915 KMS depends on ACPI_VIDEO when ACPI is enabled
88 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
89 select VIDEO_OUTPUT_CONTROL if ACPI
90 select BACKLIGHT_CLASS_DEVICE if ACPI
91 select INPUT if ACPI
92 select ACPI_VIDEO if ACPI
93 help 93 help
94 Choose this option if you want kernel modesetting enabled by default, 94 Choose this option if you want kernel modesetting enabled by default,
95 and you have a new enough userspace to support this. Running old 95 and you have a new enough userspace to support this. Running old
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 6d80d17f1e96..80a257554b30 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -170,6 +170,14 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
170 } 170 }
171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n", 171 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
172 (unsigned long long)map->offset, map->size, map->type); 172 (unsigned long long)map->offset, map->size, map->type);
173
174 /* page-align _DRM_SHM maps. They are allocated here so there is no security
175 * hole created by that and it works around various broken drivers that use
176 * a non-aligned quantity to map the SAREA. --BenH
177 */
178 if (map->type == _DRM_SHM)
179 map->size = PAGE_ALIGN(map->size);
180
173 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) { 181 if ((map->offset & (~(resource_size_t)PAGE_MASK)) || (map->size & (~PAGE_MASK))) {
174 drm_free(map, sizeof(*map), DRM_MEM_MAPS); 182 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
175 return -EINVAL; 183 return -EINVAL;
@@ -363,7 +371,8 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,
363 list->user_token = list->hash.key << PAGE_SHIFT; 371 list->user_token = list->hash.key << PAGE_SHIFT;
364 mutex_unlock(&dev->struct_mutex); 372 mutex_unlock(&dev->struct_mutex);
365 373
366 list->master = dev->primary->master; 374 if (!(map->flags & _DRM_DRIVER))
375 list->master = dev->primary->master;
367 *maplist = list; 376 *maplist = list;
368 return 0; 377 return 0;
369 } 378 }
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 94a768871734..8fab7890a363 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -2294,7 +2294,12 @@ int drm_mode_connector_property_set_ioctl(struct drm_device *dev,
2294 } 2294 }
2295 } 2295 }
2296 2296
2297 if (connector->funcs->set_property) 2297 /* Do DPMS ourselves */
2298 if (property == connector->dev->mode_config.dpms_property) {
2299 if (connector->funcs->dpms)
2300 (*connector->funcs->dpms)(connector, (int) out_resp->value);
2301 ret = 0;
2302 } else if (connector->funcs->set_property)
2298 ret = connector->funcs->set_property(connector, property, out_resp->value); 2303 ret = connector->funcs->set_property(connector, property, out_resp->value);
2299 2304
2300 /* store the property value if succesful */ 2305 /* store the property value if succesful */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 45890447feec..a6f73f1e99d9 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -199,6 +199,29 @@ static void drm_helper_add_std_modes(struct drm_device *dev,
199} 199}
200 200
201/** 201/**
202 * drm_helper_encoder_in_use - check if a given encoder is in use
203 * @encoder: encoder to check
204 *
205 * LOCKING:
206 * Caller must hold mode config lock.
207 *
208 * Walk @encoders's DRM device's mode_config and see if it's in use.
209 *
210 * RETURNS:
211 * True if @encoder is part of the mode_config, false otherwise.
212 */
213bool drm_helper_encoder_in_use(struct drm_encoder *encoder)
214{
215 struct drm_connector *connector;
216 struct drm_device *dev = encoder->dev;
217 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
218 if (connector->encoder == encoder)
219 return true;
220 return false;
221}
222EXPORT_SYMBOL(drm_helper_encoder_in_use);
223
224/**
202 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config 225 * drm_helper_crtc_in_use - check if a given CRTC is in a mode_config
203 * @crtc: CRTC to check 226 * @crtc: CRTC to check
204 * 227 *
@@ -216,7 +239,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
216 struct drm_device *dev = crtc->dev; 239 struct drm_device *dev = crtc->dev;
217 /* FIXME: Locking around list access? */ 240 /* FIXME: Locking around list access? */
218 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) 241 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
219 if (encoder->crtc == crtc) 242 if (encoder->crtc == crtc && drm_helper_encoder_in_use(encoder))
220 return true; 243 return true;
221 return false; 244 return false;
222} 245}
@@ -240,7 +263,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
240 263
241 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 264 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
242 encoder_funcs = encoder->helper_private; 265 encoder_funcs = encoder->helper_private;
243 if (!encoder->crtc) 266 if (!drm_helper_encoder_in_use(encoder))
244 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF); 267 (*encoder_funcs->dpms)(encoder, DRM_MODE_DPMS_OFF);
245 } 268 }
246 269
@@ -935,6 +958,88 @@ bool drm_helper_initial_config(struct drm_device *dev)
935} 958}
936EXPORT_SYMBOL(drm_helper_initial_config); 959EXPORT_SYMBOL(drm_helper_initial_config);
937 960
961static int drm_helper_choose_encoder_dpms(struct drm_encoder *encoder)
962{
963 int dpms = DRM_MODE_DPMS_OFF;
964 struct drm_connector *connector;
965 struct drm_device *dev = encoder->dev;
966
967 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
968 if (connector->encoder == encoder)
969 if (connector->dpms < dpms)
970 dpms = connector->dpms;
971 return dpms;
972}
973
974static int drm_helper_choose_crtc_dpms(struct drm_crtc *crtc)
975{
976 int dpms = DRM_MODE_DPMS_OFF;
977 struct drm_connector *connector;
978 struct drm_device *dev = crtc->dev;
979
980 list_for_each_entry(connector, &dev->mode_config.connector_list, head)
981 if (connector->encoder && connector->encoder->crtc == crtc)
982 if (connector->dpms < dpms)
983 dpms = connector->dpms;
984 return dpms;
985}
986
987/**
988 * drm_helper_connector_dpms
989 * @connector affected connector
990 * @mode DPMS mode
991 *
992 * Calls the low-level connector DPMS function, then
993 * calls appropriate encoder and crtc DPMS functions as well
994 */
995void drm_helper_connector_dpms(struct drm_connector *connector, int mode)
996{
997 struct drm_encoder *encoder = connector->encoder;
998 struct drm_crtc *crtc = encoder ? encoder->crtc : NULL;
999 int old_dpms;
1000
1001 if (mode == connector->dpms)
1002 return;
1003
1004 old_dpms = connector->dpms;
1005 connector->dpms = mode;
1006
1007 /* from off to on, do crtc then encoder */
1008 if (mode < old_dpms) {
1009 if (crtc) {
1010 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1011 if (crtc_funcs->dpms)
1012 (*crtc_funcs->dpms) (crtc,
1013 drm_helper_choose_crtc_dpms(crtc));
1014 }
1015 if (encoder) {
1016 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
1017 if (encoder_funcs->dpms)
1018 (*encoder_funcs->dpms) (encoder,
1019 drm_helper_choose_encoder_dpms(encoder));
1020 }
1021 }
1022
1023 /* from on to off, do encoder then crtc */
1024 if (mode > old_dpms) {
1025 if (encoder) {
1026 struct drm_encoder_helper_funcs *encoder_funcs = encoder->helper_private;
1027 if (encoder_funcs->dpms)
1028 (*encoder_funcs->dpms) (encoder,
1029 drm_helper_choose_encoder_dpms(encoder));
1030 }
1031 if (crtc) {
1032 struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
1033 if (crtc_funcs->dpms)
1034 (*crtc_funcs->dpms) (crtc,
1035 drm_helper_choose_crtc_dpms(crtc));
1036 }
1037 }
1038
1039 return;
1040}
1041EXPORT_SYMBOL(drm_helper_connector_dpms);
1042
938/** 1043/**
939 * drm_hotplug_stage_two 1044 * drm_hotplug_stage_two
940 * @dev DRM device 1045 * @dev DRM device
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index f01def16a669..019b7c578236 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -481,7 +481,7 @@ int drm_ioctl(struct inode *inode, struct file *filp,
481 } 481 }
482 retcode = func(dev, kdata, file_priv); 482 retcode = func(dev, kdata, file_priv);
483 483
484 if ((retcode == 0) && (cmd & IOC_OUT)) { 484 if (cmd & IOC_OUT) {
485 if (copy_to_user((void __user *)arg, kdata, 485 if (copy_to_user((void __user *)arg, kdata,
486 _IOC_SIZE(cmd)) != 0) 486 _IOC_SIZE(cmd)) != 0)
487 retcode = -EFAULT; 487 retcode = -EFAULT;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index ca9c61656714..801a0d0e0810 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -289,6 +289,11 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
289 struct drm_display_mode *mode; 289 struct drm_display_mode *mode;
290 struct detailed_pixel_timing *pt = &timing->data.pixel_data; 290 struct detailed_pixel_timing *pt = &timing->data.pixel_data;
291 291
292 /* ignore tiny modes */
293 if (((pt->hactive_hi << 8) | pt->hactive_lo) < 64 ||
294 ((pt->vactive_hi << 8) | pt->hactive_lo) < 64)
295 return NULL;
296
292 if (pt->stereo) { 297 if (pt->stereo) {
293 printk(KERN_WARNING "stereo mode not supported\n"); 298 printk(KERN_WARNING "stereo mode not supported\n");
294 return NULL; 299 return NULL;
@@ -584,85 +589,13 @@ int drm_do_probe_ddc_edid(struct i2c_adapter *adapter,
584} 589}
585EXPORT_SYMBOL(drm_do_probe_ddc_edid); 590EXPORT_SYMBOL(drm_do_probe_ddc_edid);
586 591
587/**
588 * Get EDID information.
589 *
590 * \param adapter : i2c device adaptor.
591 * \param buf : EDID data buffer to be filled
592 * \param len : EDID data buffer length
593 * \return 0 on success or -1 on failure.
594 *
595 * Initialize DDC, then fetch EDID information
596 * by calling drm_do_probe_ddc_edid function.
597 */
598static int drm_ddc_read(struct i2c_adapter *adapter,
599 unsigned char *buf, int len)
600{
601 struct i2c_algo_bit_data *algo_data = adapter->algo_data;
602 int i, j;
603 int ret = -1;
604
605 algo_data->setscl(algo_data->data, 1);
606
607 for (i = 0; i < 1; i++) {
608 /* For some old monitors we need the
609 * following process to initialize/stop DDC
610 */
611 algo_data->setsda(algo_data->data, 1);
612 msleep(13);
613
614 algo_data->setscl(algo_data->data, 1);
615 for (j = 0; j < 5; j++) {
616 msleep(10);
617 if (algo_data->getscl(algo_data->data))
618 break;
619 }
620 if (j == 5)
621 continue;
622
623 algo_data->setsda(algo_data->data, 0);
624 msleep(15);
625 algo_data->setscl(algo_data->data, 0);
626 msleep(15);
627 algo_data->setsda(algo_data->data, 1);
628 msleep(15);
629
630 /* Do the real work */
631 ret = drm_do_probe_ddc_edid(adapter, buf, len);
632 algo_data->setsda(algo_data->data, 0);
633 algo_data->setscl(algo_data->data, 0);
634 msleep(15);
635
636 algo_data->setscl(algo_data->data, 1);
637 for (j = 0; j < 10; j++) {
638 msleep(10);
639 if (algo_data->getscl(algo_data->data))
640 break;
641 }
642
643 algo_data->setsda(algo_data->data, 1);
644 msleep(15);
645 algo_data->setscl(algo_data->data, 0);
646 algo_data->setsda(algo_data->data, 0);
647 if (ret == 0)
648 break;
649 }
650 /* Release the DDC lines when done or the Apple Cinema HD display
651 * will switch off
652 */
653 algo_data->setsda(algo_data->data, 1);
654 algo_data->setscl(algo_data->data, 1);
655
656 return ret;
657}
658
659static int drm_ddc_read_edid(struct drm_connector *connector, 592static int drm_ddc_read_edid(struct drm_connector *connector,
660 struct i2c_adapter *adapter, 593 struct i2c_adapter *adapter,
661 char *buf, int len) 594 char *buf, int len)
662{ 595{
663 int ret; 596 int ret;
664 597
665 ret = drm_ddc_read(adapter, buf, len); 598 ret = drm_do_probe_ddc_edid(adapter, buf, len);
666 if (ret != 0) { 599 if (ret != 0) {
667 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n", 600 dev_info(&connector->dev->pdev->dev, "%s: no EDID data\n",
668 drm_get_connector_name(connector)); 601 drm_get_connector_name(connector));
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index 4984aa89cf3d..ec43005100d9 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -133,7 +133,7 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
133 133
134 BUG_ON((size & (PAGE_SIZE - 1)) != 0); 134 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
135 135
136 obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); 136 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
137 137
138 obj->dev = dev; 138 obj->dev = dev;
139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); 139 obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
diff --git a/drivers/gpu/drm/drm_hashtab.c b/drivers/gpu/drm/drm_hashtab.c
index af539f7d87dd..ac35145c3e20 100644
--- a/drivers/gpu/drm/drm_hashtab.c
+++ b/drivers/gpu/drm/drm_hashtab.c
@@ -62,6 +62,7 @@ int drm_ht_create(struct drm_open_hash *ht, unsigned int order)
62 } 62 }
63 return 0; 63 return 0;
64} 64}
65EXPORT_SYMBOL(drm_ht_create);
65 66
66void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key) 67void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key)
67{ 68{
@@ -156,6 +157,7 @@ int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *it
156 } 157 }
157 return 0; 158 return 0;
158} 159}
160EXPORT_SYMBOL(drm_ht_just_insert_please);
159 161
160int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, 162int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
161 struct drm_hash_item **item) 163 struct drm_hash_item **item)
@@ -169,6 +171,7 @@ int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key,
169 *item = hlist_entry(list, struct drm_hash_item, head); 171 *item = hlist_entry(list, struct drm_hash_item, head);
170 return 0; 172 return 0;
171} 173}
174EXPORT_SYMBOL(drm_ht_find_item);
172 175
173int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key) 176int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key)
174{ 177{
@@ -202,3 +205,4 @@ void drm_ht_remove(struct drm_open_hash *ht)
202 ht->table = NULL; 205 ht->table = NULL;
203 } 206 }
204} 207}
208EXPORT_SYMBOL(drm_ht_remove);
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 93e677a481f5..fc8e5acd9d9a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -196,6 +196,7 @@ int drm_irq_install(struct drm_device *dev)
196{ 196{
197 int ret = 0; 197 int ret = 0;
198 unsigned long sh_flags = 0; 198 unsigned long sh_flags = 0;
199 char *irqname;
199 200
200 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ)) 201 if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
201 return -EINVAL; 202 return -EINVAL;
@@ -227,8 +228,13 @@ int drm_irq_install(struct drm_device *dev)
227 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED)) 228 if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
228 sh_flags = IRQF_SHARED; 229 sh_flags = IRQF_SHARED;
229 230
231 if (dev->devname)
232 irqname = dev->devname;
233 else
234 irqname = dev->driver->name;
235
230 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler, 236 ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
231 sh_flags, dev->devname, dev); 237 sh_flags, irqname, dev);
232 238
233 if (ret < 0) { 239 if (ret < 0) {
234 mutex_lock(&dev->struct_mutex); 240 mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 367c590ffbba..7819fd930a51 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -42,8 +42,11 @@
42 */ 42 */
43 43
44#include "drmP.h" 44#include "drmP.h"
45#include "drm_mm.h"
45#include <linux/slab.h> 46#include <linux/slab.h>
46 47
48#define MM_UNUSED_TARGET 4
49
47unsigned long drm_mm_tail_space(struct drm_mm *mm) 50unsigned long drm_mm_tail_space(struct drm_mm *mm)
48{ 51{
49 struct list_head *tail_node; 52 struct list_head *tail_node;
@@ -74,16 +77,62 @@ int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size)
74 return 0; 77 return 0;
75} 78}
76 79
80static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
81{
82 struct drm_mm_node *child;
83
84 if (atomic)
85 child = kmalloc(sizeof(*child), GFP_ATOMIC);
86 else
87 child = kmalloc(sizeof(*child), GFP_KERNEL);
88
89 if (unlikely(child == NULL)) {
90 spin_lock(&mm->unused_lock);
91 if (list_empty(&mm->unused_nodes))
92 child = NULL;
93 else {
94 child =
95 list_entry(mm->unused_nodes.next,
96 struct drm_mm_node, fl_entry);
97 list_del(&child->fl_entry);
98 --mm->num_unused;
99 }
100 spin_unlock(&mm->unused_lock);
101 }
102 return child;
103}
104
105int drm_mm_pre_get(struct drm_mm *mm)
106{
107 struct drm_mm_node *node;
108
109 spin_lock(&mm->unused_lock);
110 while (mm->num_unused < MM_UNUSED_TARGET) {
111 spin_unlock(&mm->unused_lock);
112 node = kmalloc(sizeof(*node), GFP_KERNEL);
113 spin_lock(&mm->unused_lock);
114
115 if (unlikely(node == NULL)) {
116 int ret = (mm->num_unused < 2) ? -ENOMEM : 0;
117 spin_unlock(&mm->unused_lock);
118 return ret;
119 }
120 ++mm->num_unused;
121 list_add_tail(&node->fl_entry, &mm->unused_nodes);
122 }
123 spin_unlock(&mm->unused_lock);
124 return 0;
125}
126EXPORT_SYMBOL(drm_mm_pre_get);
77 127
78static int drm_mm_create_tail_node(struct drm_mm *mm, 128static int drm_mm_create_tail_node(struct drm_mm *mm,
79 unsigned long start, 129 unsigned long start,
80 unsigned long size) 130 unsigned long size, int atomic)
81{ 131{
82 struct drm_mm_node *child; 132 struct drm_mm_node *child;
83 133
84 child = (struct drm_mm_node *) 134 child = drm_mm_kmalloc(mm, atomic);
85 drm_alloc(sizeof(*child), DRM_MEM_MM); 135 if (unlikely(child == NULL))
86 if (!child)
87 return -ENOMEM; 136 return -ENOMEM;
88 137
89 child->free = 1; 138 child->free = 1;
@@ -97,8 +146,7 @@ static int drm_mm_create_tail_node(struct drm_mm *mm,
97 return 0; 146 return 0;
98} 147}
99 148
100 149int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size, int atomic)
101int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
102{ 150{
103 struct list_head *tail_node; 151 struct list_head *tail_node;
104 struct drm_mm_node *entry; 152 struct drm_mm_node *entry;
@@ -106,20 +154,21 @@ int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size)
106 tail_node = mm->ml_entry.prev; 154 tail_node = mm->ml_entry.prev;
107 entry = list_entry(tail_node, struct drm_mm_node, ml_entry); 155 entry = list_entry(tail_node, struct drm_mm_node, ml_entry);
108 if (!entry->free) { 156 if (!entry->free) {
109 return drm_mm_create_tail_node(mm, entry->start + entry->size, size); 157 return drm_mm_create_tail_node(mm, entry->start + entry->size,
158 size, atomic);
110 } 159 }
111 entry->size += size; 160 entry->size += size;
112 return 0; 161 return 0;
113} 162}
114 163
115static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent, 164static struct drm_mm_node *drm_mm_split_at_start(struct drm_mm_node *parent,
116 unsigned long size) 165 unsigned long size,
166 int atomic)
117{ 167{
118 struct drm_mm_node *child; 168 struct drm_mm_node *child;
119 169
120 child = (struct drm_mm_node *) 170 child = drm_mm_kmalloc(parent->mm, atomic);
121 drm_alloc(sizeof(*child), DRM_MEM_MM); 171 if (unlikely(child == NULL))
122 if (!child)
123 return NULL; 172 return NULL;
124 173
125 INIT_LIST_HEAD(&child->fl_entry); 174 INIT_LIST_HEAD(&child->fl_entry);
@@ -151,8 +200,9 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
151 tmp = parent->start % alignment; 200 tmp = parent->start % alignment;
152 201
153 if (tmp) { 202 if (tmp) {
154 align_splitoff = drm_mm_split_at_start(parent, alignment - tmp); 203 align_splitoff =
155 if (!align_splitoff) 204 drm_mm_split_at_start(parent, alignment - tmp, 0);
205 if (unlikely(align_splitoff == NULL))
156 return NULL; 206 return NULL;
157 } 207 }
158 208
@@ -161,7 +211,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
161 parent->free = 0; 211 parent->free = 0;
162 return parent; 212 return parent;
163 } else { 213 } else {
164 child = drm_mm_split_at_start(parent, size); 214 child = drm_mm_split_at_start(parent, size, 0);
165 } 215 }
166 216
167 if (align_splitoff) 217 if (align_splitoff)
@@ -169,14 +219,49 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
169 219
170 return child; 220 return child;
171} 221}
222
172EXPORT_SYMBOL(drm_mm_get_block); 223EXPORT_SYMBOL(drm_mm_get_block);
173 224
225struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
226 unsigned long size,
227 unsigned alignment)
228{
229
230 struct drm_mm_node *align_splitoff = NULL;
231 struct drm_mm_node *child;
232 unsigned tmp = 0;
233
234 if (alignment)
235 tmp = parent->start % alignment;
236
237 if (tmp) {
238 align_splitoff =
239 drm_mm_split_at_start(parent, alignment - tmp, 1);
240 if (unlikely(align_splitoff == NULL))
241 return NULL;
242 }
243
244 if (parent->size == size) {
245 list_del_init(&parent->fl_entry);
246 parent->free = 0;
247 return parent;
248 } else {
249 child = drm_mm_split_at_start(parent, size, 1);
250 }
251
252 if (align_splitoff)
253 drm_mm_put_block(align_splitoff);
254
255 return child;
256}
257EXPORT_SYMBOL(drm_mm_get_block_atomic);
258
174/* 259/*
175 * Put a block. Merge with the previous and / or next block if they are free. 260 * Put a block. Merge with the previous and / or next block if they are free.
176 * Otherwise add to the free stack. 261 * Otherwise add to the free stack.
177 */ 262 */
178 263
179void drm_mm_put_block(struct drm_mm_node * cur) 264void drm_mm_put_block(struct drm_mm_node *cur)
180{ 265{
181 266
182 struct drm_mm *mm = cur->mm; 267 struct drm_mm *mm = cur->mm;
@@ -188,21 +273,27 @@ void drm_mm_put_block(struct drm_mm_node * cur)
188 int merged = 0; 273 int merged = 0;
189 274
190 if (cur_head->prev != root_head) { 275 if (cur_head->prev != root_head) {
191 prev_node = list_entry(cur_head->prev, struct drm_mm_node, ml_entry); 276 prev_node =
277 list_entry(cur_head->prev, struct drm_mm_node, ml_entry);
192 if (prev_node->free) { 278 if (prev_node->free) {
193 prev_node->size += cur->size; 279 prev_node->size += cur->size;
194 merged = 1; 280 merged = 1;
195 } 281 }
196 } 282 }
197 if (cur_head->next != root_head) { 283 if (cur_head->next != root_head) {
198 next_node = list_entry(cur_head->next, struct drm_mm_node, ml_entry); 284 next_node =
285 list_entry(cur_head->next, struct drm_mm_node, ml_entry);
199 if (next_node->free) { 286 if (next_node->free) {
200 if (merged) { 287 if (merged) {
201 prev_node->size += next_node->size; 288 prev_node->size += next_node->size;
202 list_del(&next_node->ml_entry); 289 list_del(&next_node->ml_entry);
203 list_del(&next_node->fl_entry); 290 list_del(&next_node->fl_entry);
204 drm_free(next_node, sizeof(*next_node), 291 if (mm->num_unused < MM_UNUSED_TARGET) {
205 DRM_MEM_MM); 292 list_add(&next_node->fl_entry,
293 &mm->unused_nodes);
294 ++mm->num_unused;
295 } else
296 kfree(next_node);
206 } else { 297 } else {
207 next_node->size += cur->size; 298 next_node->size += cur->size;
208 next_node->start = cur->start; 299 next_node->start = cur->start;
@@ -215,14 +306,19 @@ void drm_mm_put_block(struct drm_mm_node * cur)
215 list_add(&cur->fl_entry, &mm->fl_entry); 306 list_add(&cur->fl_entry, &mm->fl_entry);
216 } else { 307 } else {
217 list_del(&cur->ml_entry); 308 list_del(&cur->ml_entry);
218 drm_free(cur, sizeof(*cur), DRM_MEM_MM); 309 if (mm->num_unused < MM_UNUSED_TARGET) {
310 list_add(&cur->fl_entry, &mm->unused_nodes);
311 ++mm->num_unused;
312 } else
313 kfree(cur);
219 } 314 }
220} 315}
316
221EXPORT_SYMBOL(drm_mm_put_block); 317EXPORT_SYMBOL(drm_mm_put_block);
222 318
223struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, 319struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
224 unsigned long size, 320 unsigned long size,
225 unsigned alignment, int best_match) 321 unsigned alignment, int best_match)
226{ 322{
227 struct list_head *list; 323 struct list_head *list;
228 const struct list_head *free_stack = &mm->fl_entry; 324 const struct list_head *free_stack = &mm->fl_entry;
@@ -247,7 +343,6 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
247 wasted += alignment - tmp; 343 wasted += alignment - tmp;
248 } 344 }
249 345
250
251 if (entry->size >= size + wasted) { 346 if (entry->size >= size + wasted) {
252 if (!best_match) 347 if (!best_match)
253 return entry; 348 return entry;
@@ -260,6 +355,7 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
260 355
261 return best; 356 return best;
262} 357}
358EXPORT_SYMBOL(drm_mm_search_free);
263 359
264int drm_mm_clean(struct drm_mm * mm) 360int drm_mm_clean(struct drm_mm * mm)
265{ 361{
@@ -267,14 +363,17 @@ int drm_mm_clean(struct drm_mm * mm)
267 363
268 return (head->next->next == head); 364 return (head->next->next == head);
269} 365}
270EXPORT_SYMBOL(drm_mm_search_free); 366EXPORT_SYMBOL(drm_mm_clean);
271 367
272int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) 368int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
273{ 369{
274 INIT_LIST_HEAD(&mm->ml_entry); 370 INIT_LIST_HEAD(&mm->ml_entry);
275 INIT_LIST_HEAD(&mm->fl_entry); 371 INIT_LIST_HEAD(&mm->fl_entry);
372 INIT_LIST_HEAD(&mm->unused_nodes);
373 mm->num_unused = 0;
374 spin_lock_init(&mm->unused_lock);
276 375
277 return drm_mm_create_tail_node(mm, start, size); 376 return drm_mm_create_tail_node(mm, start, size, 0);
278} 377}
279EXPORT_SYMBOL(drm_mm_init); 378EXPORT_SYMBOL(drm_mm_init);
280 379
@@ -282,6 +381,7 @@ void drm_mm_takedown(struct drm_mm * mm)
282{ 381{
283 struct list_head *bnode = mm->fl_entry.next; 382 struct list_head *bnode = mm->fl_entry.next;
284 struct drm_mm_node *entry; 383 struct drm_mm_node *entry;
384 struct drm_mm_node *next;
285 385
286 entry = list_entry(bnode, struct drm_mm_node, fl_entry); 386 entry = list_entry(bnode, struct drm_mm_node, fl_entry);
287 387
@@ -293,7 +393,16 @@ void drm_mm_takedown(struct drm_mm * mm)
293 393
294 list_del(&entry->fl_entry); 394 list_del(&entry->fl_entry);
295 list_del(&entry->ml_entry); 395 list_del(&entry->ml_entry);
396 kfree(entry);
397
398 spin_lock(&mm->unused_lock);
399 list_for_each_entry_safe(entry, next, &mm->unused_nodes, fl_entry) {
400 list_del(&entry->fl_entry);
401 kfree(entry);
402 --mm->num_unused;
403 }
404 spin_unlock(&mm->unused_lock);
296 405
297 drm_free(entry, sizeof(*entry), DRM_MEM_MM); 406 BUG_ON(mm->num_unused != 0);
298} 407}
299EXPORT_SYMBOL(drm_mm_takedown); 408EXPORT_SYMBOL(drm_mm_takedown);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index c9b80fdd4630..54f492a488a9 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -38,6 +38,7 @@
38#include "drm.h" 38#include "drm.h"
39#include "drm_crtc.h" 39#include "drm_crtc.h"
40 40
41#define DRM_MODESET_DEBUG "drm_mode"
41/** 42/**
42 * drm_mode_debug_printmodeline - debug print a mode 43 * drm_mode_debug_printmodeline - debug print a mode
43 * @dev: DRM device 44 * @dev: DRM device
@@ -50,12 +51,13 @@
50 */ 51 */
51void drm_mode_debug_printmodeline(struct drm_display_mode *mode) 52void drm_mode_debug_printmodeline(struct drm_display_mode *mode)
52{ 53{
53 DRM_DEBUG("Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n", 54 DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
54 mode->base.id, mode->name, mode->vrefresh, mode->clock, 55 "Modeline %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x\n",
55 mode->hdisplay, mode->hsync_start, 56 mode->base.id, mode->name, mode->vrefresh, mode->clock,
56 mode->hsync_end, mode->htotal, 57 mode->hdisplay, mode->hsync_start,
57 mode->vdisplay, mode->vsync_start, 58 mode->hsync_end, mode->htotal,
58 mode->vsync_end, mode->vtotal, mode->type, mode->flags); 59 mode->vdisplay, mode->vsync_start,
60 mode->vsync_end, mode->vtotal, mode->type, mode->flags);
59} 61}
60EXPORT_SYMBOL(drm_mode_debug_printmodeline); 62EXPORT_SYMBOL(drm_mode_debug_printmodeline);
61 63
@@ -401,7 +403,9 @@ void drm_mode_prune_invalid(struct drm_device *dev,
401 list_del(&mode->head); 403 list_del(&mode->head);
402 if (verbose) { 404 if (verbose) {
403 drm_mode_debug_printmodeline(mode); 405 drm_mode_debug_printmodeline(mode);
404 DRM_DEBUG("Not using %s mode %d\n", mode->name, mode->status); 406 DRM_DEBUG_MODE(DRM_MODESET_DEBUG,
407 "Not using %s mode %d\n",
408 mode->name, mode->status);
405 } 409 }
406 drm_mode_destroy(dev, mode); 410 drm_mode_destroy(dev, mode);
407 } 411 }
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index b9631e3a1ea6..89050684fe0d 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -51,7 +51,22 @@ struct idr drm_minors_idr;
51struct class *drm_class; 51struct class *drm_class;
52struct proc_dir_entry *drm_proc_root; 52struct proc_dir_entry *drm_proc_root;
53struct dentry *drm_debugfs_root; 53struct dentry *drm_debugfs_root;
54 54void drm_ut_debug_printk(unsigned int request_level,
55 const char *prefix,
56 const char *function_name,
57 const char *format, ...)
58{
59 va_list args;
60
61 if (drm_debug & request_level) {
62 if (function_name)
63 printk(KERN_DEBUG "[%s:%s], ", prefix, function_name);
64 va_start(args, format);
65 vprintk(format, args);
66 va_end(args);
67 }
68}
69EXPORT_SYMBOL(drm_ut_debug_printk);
55static int drm_minor_get_id(struct drm_device *dev, int type) 70static int drm_minor_get_id(struct drm_device *dev, int type)
56{ 71{
57 int new_id; 72 int new_id;
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index 8f9372921f82..9987ab880835 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -147,7 +147,7 @@ static ssize_t status_show(struct device *device,
147 enum drm_connector_status status; 147 enum drm_connector_status status;
148 148
149 status = connector->funcs->detect(connector); 149 status = connector->funcs->detect(connector);
150 return snprintf(buf, PAGE_SIZE, "%s", 150 return snprintf(buf, PAGE_SIZE, "%s\n",
151 drm_get_connector_status_name(status)); 151 drm_get_connector_status_name(status));
152} 152}
153 153
@@ -166,7 +166,7 @@ static ssize_t dpms_show(struct device *device,
166 if (ret) 166 if (ret)
167 return 0; 167 return 0;
168 168
169 return snprintf(buf, PAGE_SIZE, "%s", 169 return snprintf(buf, PAGE_SIZE, "%s\n",
170 drm_get_dpms_name((int)dpms_status)); 170 drm_get_dpms_name((int)dpms_status));
171} 171}
172 172
@@ -176,7 +176,7 @@ static ssize_t enabled_show(struct device *device,
176{ 176{
177 struct drm_connector *connector = to_drm_connector(device); 177 struct drm_connector *connector = to_drm_connector(device);
178 178
179 return snprintf(buf, PAGE_SIZE, connector->encoder ? "enabled" : 179 return snprintf(buf, PAGE_SIZE, "%s\n", connector->encoder ? "enabled" :
180 "disabled"); 180 "disabled");
181} 181}
182 182
@@ -317,6 +317,7 @@ static struct device_attribute connector_attrs_opt1[] = {
317 317
318static struct bin_attribute edid_attr = { 318static struct bin_attribute edid_attr = {
319 .attr.name = "edid", 319 .attr.name = "edid",
320 .attr.mode = 0444,
320 .size = 128, 321 .size = 128,
321 .read = edid_show, 322 .read = edid_show,
322}; 323};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 53d544552625..1a60626f6803 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -33,6 +33,8 @@
33#include "i915_drm.h" 33#include "i915_drm.h"
34#include "i915_drv.h" 34#include "i915_drv.h"
35 35
36#define I915_DRV "i915_drv"
37
36/* Really want an OS-independent resettable timer. Would like to have 38/* Really want an OS-independent resettable timer. Would like to have
37 * this loop run for (eg) 3 sec, but have the timer reset every time 39 * this loop run for (eg) 3 sec, but have the timer reset every time
38 * the head pointer changes, so that EBUSY only happens if the ring 40 * the head pointer changes, so that EBUSY only happens if the ring
@@ -99,7 +101,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
99 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 101 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
100 102
101 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 103 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
102 DRM_DEBUG("Enabled hardware status page\n"); 104 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
103 return 0; 105 return 0;
104} 106}
105 107
@@ -185,7 +187,8 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
185 master_priv->sarea_priv = (drm_i915_sarea_t *) 187 master_priv->sarea_priv = (drm_i915_sarea_t *)
186 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset); 188 ((u8 *)master_priv->sarea->handle + init->sarea_priv_offset);
187 } else { 189 } else {
188 DRM_DEBUG("sarea not found assuming DRI2 userspace\n"); 190 DRM_DEBUG_DRIVER(I915_DRV,
191 "sarea not found assuming DRI2 userspace\n");
189 } 192 }
190 193
191 if (init->ring_size != 0) { 194 if (init->ring_size != 0) {
@@ -235,7 +238,7 @@ static int i915_dma_resume(struct drm_device * dev)
235{ 238{
236 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 239 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
237 240
238 DRM_DEBUG("%s\n", __func__); 241 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
239 242
240 if (dev_priv->ring.map.handle == NULL) { 243 if (dev_priv->ring.map.handle == NULL) {
241 DRM_ERROR("can not ioremap virtual address for" 244 DRM_ERROR("can not ioremap virtual address for"
@@ -248,13 +251,14 @@ static int i915_dma_resume(struct drm_device * dev)
248 DRM_ERROR("Can not find hardware status page\n"); 251 DRM_ERROR("Can not find hardware status page\n");
249 return -EINVAL; 252 return -EINVAL;
250 } 253 }
251 DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page); 254 DRM_DEBUG_DRIVER(I915_DRV, "hw status page @ %p\n",
255 dev_priv->hw_status_page);
252 256
253 if (dev_priv->status_gfx_addr != 0) 257 if (dev_priv->status_gfx_addr != 0)
254 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 258 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
255 else 259 else
256 I915_WRITE(HWS_PGA, dev_priv->dma_status_page); 260 I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
257 DRM_DEBUG("Enabled hardware status page\n"); 261 DRM_DEBUG_DRIVER(I915_DRV, "Enabled hardware status page\n");
258 262
259 return 0; 263 return 0;
260} 264}
@@ -548,10 +552,10 @@ static int i915_dispatch_flip(struct drm_device * dev)
548 if (!master_priv->sarea_priv) 552 if (!master_priv->sarea_priv)
549 return -EINVAL; 553 return -EINVAL;
550 554
551 DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n", 555 DRM_DEBUG_DRIVER(I915_DRV, "%s: page=%d pfCurrentPage=%d\n",
552 __func__, 556 __func__,
553 dev_priv->current_page, 557 dev_priv->current_page,
554 master_priv->sarea_priv->pf_current_page); 558 master_priv->sarea_priv->pf_current_page);
555 559
556 i915_kernel_lost_context(dev); 560 i915_kernel_lost_context(dev);
557 561
@@ -629,8 +633,9 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
629 return -EINVAL; 633 return -EINVAL;
630 } 634 }
631 635
632 DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n", 636 DRM_DEBUG_DRIVER(I915_DRV,
633 batch->start, batch->used, batch->num_cliprects); 637 "i915 batchbuffer, start %x used %d cliprects %d\n",
638 batch->start, batch->used, batch->num_cliprects);
634 639
635 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 640 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
636 641
@@ -678,8 +683,9 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
678 void *batch_data; 683 void *batch_data;
679 int ret; 684 int ret;
680 685
681 DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n", 686 DRM_DEBUG_DRIVER(I915_DRV,
682 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects); 687 "i915 cmdbuffer, buf %p sz %d cliprects %d\n",
688 cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
683 689
684 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 690 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
685 691
@@ -734,7 +740,7 @@ static int i915_flip_bufs(struct drm_device *dev, void *data,
734{ 740{
735 int ret; 741 int ret;
736 742
737 DRM_DEBUG("%s\n", __func__); 743 DRM_DEBUG_DRIVER(I915_DRV, "%s\n", __func__);
738 744
739 RING_LOCK_TEST_WITH_RETURN(dev, file_priv); 745 RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
740 746
@@ -777,7 +783,8 @@ static int i915_getparam(struct drm_device *dev, void *data,
777 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start; 783 value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
778 break; 784 break;
779 default: 785 default:
780 DRM_DEBUG("Unknown parameter %d\n", param->param); 786 DRM_DEBUG_DRIVER(I915_DRV, "Unknown parameter %d\n",
787 param->param);
781 return -EINVAL; 788 return -EINVAL;
782 } 789 }
783 790
@@ -817,7 +824,8 @@ static int i915_setparam(struct drm_device *dev, void *data,
817 dev_priv->fence_reg_start = param->value; 824 dev_priv->fence_reg_start = param->value;
818 break; 825 break;
819 default: 826 default:
820 DRM_DEBUG("unknown parameter %d\n", param->param); 827 DRM_DEBUG_DRIVER(I915_DRV, "unknown parameter %d\n",
828 param->param);
821 return -EINVAL; 829 return -EINVAL;
822 } 830 }
823 831
@@ -865,9 +873,10 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
865 873
866 memset(dev_priv->hw_status_page, 0, PAGE_SIZE); 874 memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
867 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); 875 I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
868 DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n", 876 DRM_DEBUG_DRIVER(I915_DRV, "load hws HWS_PGA with gfx mem 0x%x\n",
869 dev_priv->status_gfx_addr); 877 dev_priv->status_gfx_addr);
870 DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page); 878 DRM_DEBUG_DRIVER(I915_DRV, "load hws at %p\n",
879 dev_priv->hw_status_page);
871 return 0; 880 return 0;
872} 881}
873 882
@@ -922,7 +931,7 @@ static int i915_probe_agp(struct drm_device *dev, unsigned long *aperture_size,
922 * Some of the preallocated space is taken by the GTT 931 * Some of the preallocated space is taken by the GTT
923 * and popup. GTT is 1K per MB of aperture size, and popup is 4K. 932 * and popup. GTT is 1K per MB of aperture size, and popup is 4K.
924 */ 933 */
925 if (IS_G4X(dev) || IS_IGD(dev)) 934 if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
926 overhead = 4096; 935 overhead = 4096;
927 else 936 else
928 overhead = (*aperture_size / 1024) + 4096; 937 overhead = (*aperture_size / 1024) + 4096;
@@ -987,12 +996,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
987 int fb_bar = IS_I9XX(dev) ? 2 : 0; 996 int fb_bar = IS_I9XX(dev) ? 2 : 0;
988 int ret = 0; 997 int ret = 0;
989 998
990 dev->devname = kstrdup(DRIVER_NAME, GFP_KERNEL);
991 if (!dev->devname) {
992 ret = -ENOMEM;
993 goto out;
994 }
995
996 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) & 999 dev->mode_config.fb_base = drm_get_resource_start(dev, fb_bar) &
997 0xff000000; 1000 0xff000000;
998 1001
@@ -1006,7 +1009,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1006 1009
1007 ret = i915_probe_agp(dev, &agp_size, &prealloc_size); 1010 ret = i915_probe_agp(dev, &agp_size, &prealloc_size);
1008 if (ret) 1011 if (ret)
1009 goto kfree_devname; 1012 goto out;
1010 1013
1011 /* Basic memrange allocator for stolen space (aka vram) */ 1014 /* Basic memrange allocator for stolen space (aka vram) */
1012 drm_mm_init(&dev_priv->vram, 0, prealloc_size); 1015 drm_mm_init(&dev_priv->vram, 0, prealloc_size);
@@ -1024,7 +1027,7 @@ static int i915_load_modeset_init(struct drm_device *dev)
1024 1027
1025 ret = i915_gem_init_ringbuffer(dev); 1028 ret = i915_gem_init_ringbuffer(dev);
1026 if (ret) 1029 if (ret)
1027 goto kfree_devname; 1030 goto out;
1028 1031
1029 /* Allow hardware batchbuffers unless told otherwise. 1032 /* Allow hardware batchbuffers unless told otherwise.
1030 */ 1033 */
@@ -1056,8 +1059,6 @@ static int i915_load_modeset_init(struct drm_device *dev)
1056 1059
1057destroy_ringbuffer: 1060destroy_ringbuffer:
1058 i915_gem_cleanup_ringbuffer(dev); 1061 i915_gem_cleanup_ringbuffer(dev);
1059kfree_devname:
1060 kfree(dev->devname);
1061out: 1062out:
1062 return ret; 1063 return ret;
1063} 1064}
@@ -1161,8 +1162,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1161#endif 1162#endif
1162 1163
1163 dev->driver->get_vblank_counter = i915_get_vblank_counter; 1164 dev->driver->get_vblank_counter = i915_get_vblank_counter;
1164 if (IS_GM45(dev)) 1165 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
1166 if (IS_G4X(dev) || IS_IGDNG(dev)) {
1167 dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
1165 dev->driver->get_vblank_counter = gm45_get_vblank_counter; 1168 dev->driver->get_vblank_counter = gm45_get_vblank_counter;
1169 }
1166 1170
1167 i915_gem_load(dev); 1171 i915_gem_load(dev);
1168 1172
@@ -1206,7 +1210,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1206 } 1210 }
1207 1211
1208 /* Must be done after probing outputs */ 1212 /* Must be done after probing outputs */
1209 intel_opregion_init(dev, 0); 1213 /* FIXME: verify on IGDNG */
1214 if (!IS_IGDNG(dev))
1215 intel_opregion_init(dev, 0);
1210 1216
1211 return 0; 1217 return 0;
1212 1218
@@ -1240,7 +1246,8 @@ int i915_driver_unload(struct drm_device *dev)
1240 if (dev_priv->regs != NULL) 1246 if (dev_priv->regs != NULL)
1241 iounmap(dev_priv->regs); 1247 iounmap(dev_priv->regs);
1242 1248
1243 intel_opregion_free(dev, 0); 1249 if (!IS_IGDNG(dev))
1250 intel_opregion_free(dev, 0);
1244 1251
1245 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1252 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1246 intel_modeset_cleanup(dev); 1253 intel_modeset_cleanup(dev);
@@ -1264,7 +1271,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1264{ 1271{
1265 struct drm_i915_file_private *i915_file_priv; 1272 struct drm_i915_file_private *i915_file_priv;
1266 1273
1267 DRM_DEBUG("\n"); 1274 DRM_DEBUG_DRIVER(I915_DRV, "\n");
1268 i915_file_priv = (struct drm_i915_file_private *) 1275 i915_file_priv = (struct drm_i915_file_private *)
1269 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); 1276 drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
1270 1277
@@ -1273,8 +1280,7 @@ int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
1273 1280
1274 file_priv->driver_priv = i915_file_priv; 1281 file_priv->driver_priv = i915_file_priv;
1275 1282
1276 i915_file_priv->mm.last_gem_seqno = 0; 1283 INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
1277 i915_file_priv->mm.last_gem_throttle_seqno = 0;
1278 1284
1279 return 0; 1285 return 0;
1280} 1286}
@@ -1311,6 +1317,7 @@ void i915_driver_lastclose(struct drm_device * dev)
1311void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) 1317void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
1312{ 1318{
1313 drm_i915_private_t *dev_priv = dev->dev_private; 1319 drm_i915_private_t *dev_priv = dev->dev_private;
1320 i915_gem_release(dev, file_priv);
1314 if (!drm_core_check_feature(dev, DRIVER_MODESET)) 1321 if (!drm_core_check_feature(dev, DRIVER_MODESET))
1315 i915_mem_release(dev, file_priv, dev_priv->agp_heap); 1322 i915_mem_release(dev, file_priv, dev_priv->agp_heap);
1316} 1323}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 9b149fe824c3..8ef6bcec211b 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,13 @@ struct drm_i915_fence_reg {
126 struct drm_gem_object *obj; 126 struct drm_gem_object *obj;
127}; 127};
128 128
129struct sdvo_device_mapping {
130 u8 dvo_port;
131 u8 slave_addr;
132 u8 dvo_wiring;
133 u8 initialized;
134};
135
129typedef struct drm_i915_private { 136typedef struct drm_i915_private {
130 struct drm_device *dev; 137 struct drm_device *dev;
131 138
@@ -143,6 +150,8 @@ typedef struct drm_i915_private {
143 drm_local_map_t hws_map; 150 drm_local_map_t hws_map;
144 struct drm_gem_object *hws_obj; 151 struct drm_gem_object *hws_obj;
145 152
153 struct resource mch_res;
154
146 unsigned int cpp; 155 unsigned int cpp;
147 int back_offset; 156 int back_offset;
148 int front_offset; 157 int front_offset;
@@ -158,6 +167,11 @@ typedef struct drm_i915_private {
158 /** Cached value of IMR to avoid reads in updating the bitfield */ 167 /** Cached value of IMR to avoid reads in updating the bitfield */
159 u32 irq_mask_reg; 168 u32 irq_mask_reg;
160 u32 pipestat[2]; 169 u32 pipestat[2];
170 /** splitted irq regs for graphics and display engine on IGDNG,
171 irq_mask_reg is still used for display irq. */
172 u32 gt_irq_mask_reg;
173 u32 gt_irq_enable_reg;
174 u32 de_irq_enable_reg;
161 175
162 u32 hotplug_supported_mask; 176 u32 hotplug_supported_mask;
163 struct work_struct hotplug_work; 177 struct work_struct hotplug_work;
@@ -180,7 +194,8 @@ typedef struct drm_i915_private {
180 int backlight_duty_cycle; /* restore backlight to this value */ 194 int backlight_duty_cycle; /* restore backlight to this value */
181 bool panel_wants_dither; 195 bool panel_wants_dither;
182 struct drm_display_mode *panel_fixed_mode; 196 struct drm_display_mode *panel_fixed_mode;
183 struct drm_display_mode *vbt_mode; /* if any */ 197 struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
198 struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
184 199
185 /* Feature bits from the VBIOS */ 200 /* Feature bits from the VBIOS */
186 unsigned int int_tv_support:1; 201 unsigned int int_tv_support:1;
@@ -284,6 +299,13 @@ typedef struct drm_i915_private {
284 u8 saveDACMASK; 299 u8 saveDACMASK;
285 u8 saveCR[37]; 300 u8 saveCR[37];
286 uint64_t saveFENCE[16]; 301 uint64_t saveFENCE[16];
302 u32 saveCURACNTR;
303 u32 saveCURAPOS;
304 u32 saveCURABASE;
305 u32 saveCURBCNTR;
306 u32 saveCURBPOS;
307 u32 saveCURBBASE;
308 u32 saveCURSIZE;
287 309
288 struct { 310 struct {
289 struct drm_mm gtt_space; 311 struct drm_mm gtt_space;
@@ -381,6 +403,7 @@ typedef struct drm_i915_private {
381 /* storage for physical objects */ 403 /* storage for physical objects */
382 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT]; 404 struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
383 } mm; 405 } mm;
406 struct sdvo_device_mapping sdvo_mappings[2];
384} drm_i915_private_t; 407} drm_i915_private_t;
385 408
386/** driver private structure attached to each drm_gem_object */ 409/** driver private structure attached to each drm_gem_object */
@@ -490,13 +513,16 @@ struct drm_i915_gem_request {
490 /** Time at which this request was emitted, in jiffies. */ 513 /** Time at which this request was emitted, in jiffies. */
491 unsigned long emitted_jiffies; 514 unsigned long emitted_jiffies;
492 515
516 /** global list entry for this request */
493 struct list_head list; 517 struct list_head list;
518
519 /** file_priv list entry for this request */
520 struct list_head client_list;
494}; 521};
495 522
496struct drm_i915_file_private { 523struct drm_i915_file_private {
497 struct { 524 struct {
498 uint32_t last_gem_seqno; 525 struct list_head request_list;
499 uint32_t last_gem_throttle_seqno;
500 } mm; 526 } mm;
501}; 527};
502 528
@@ -641,6 +667,7 @@ void i915_gem_detach_phys_object(struct drm_device *dev,
641void i915_gem_free_all_phys_object(struct drm_device *dev); 667void i915_gem_free_all_phys_object(struct drm_device *dev);
642int i915_gem_object_get_pages(struct drm_gem_object *obj); 668int i915_gem_object_get_pages(struct drm_gem_object *obj);
643void i915_gem_object_put_pages(struct drm_gem_object *obj); 669void i915_gem_object_put_pages(struct drm_gem_object *obj);
670void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
644 671
645/* i915_gem_tiling.c */ 672/* i915_gem_tiling.c */
646void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); 673void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
@@ -784,7 +811,9 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
784 (dev)->pci_device == 0x2E02 || \ 811 (dev)->pci_device == 0x2E02 || \
785 (dev)->pci_device == 0x2E12 || \ 812 (dev)->pci_device == 0x2E12 || \
786 (dev)->pci_device == 0x2E22 || \ 813 (dev)->pci_device == 0x2E22 || \
787 (dev)->pci_device == 0x2E32) 814 (dev)->pci_device == 0x2E32 || \
815 (dev)->pci_device == 0x0042 || \
816 (dev)->pci_device == 0x0046)
788 817
789#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \ 818#define IS_I965GM(dev) ((dev)->pci_device == 0x2A02 || \
790 (dev)->pci_device == 0x2A12) 819 (dev)->pci_device == 0x2A12)
@@ -806,20 +835,26 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
806 (dev)->pci_device == 0x29D2 || \ 835 (dev)->pci_device == 0x29D2 || \
807 (IS_IGD(dev))) 836 (IS_IGD(dev)))
808 837
838#define IS_IGDNG_D(dev) ((dev)->pci_device == 0x0042)
839#define IS_IGDNG_M(dev) ((dev)->pci_device == 0x0046)
840#define IS_IGDNG(dev) (IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
841
809#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \ 842#define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
810 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev)) 843 IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
844 IS_IGDNG(dev))
811 845
812#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \ 846#define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
813 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \ 847 IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
814 IS_IGD(dev)) 848 IS_IGD(dev) || IS_IGDNG_M(dev))
815 849
816#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev)) 850#define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
851 IS_IGDNG(dev))
817/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte 852/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
818 * rows, which changed the alignment requirements and fence programming. 853 * rows, which changed the alignment requirements and fence programming.
819 */ 854 */
820#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \ 855#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
821 IS_I915GM(dev))) 856 IS_I915GM(dev)))
822#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev)) 857#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_IGDNG(dev))
823#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev)) 858#define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_I965G(dev))
824 859
825#define PRIMARY_RINGBUFFER_SIZE (128*1024) 860#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index b189b49c7602..c0ae6bbbd9b5 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -349,7 +349,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 349 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
350 num_pages = last_data_page - first_data_page + 1; 350 num_pages = last_data_page - first_data_page + 1;
351 351
352 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 352 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
353 if (user_pages == NULL) 353 if (user_pages == NULL)
354 return -ENOMEM; 354 return -ENOMEM;
355 355
@@ -429,7 +429,7 @@ fail_put_user_pages:
429 SetPageDirty(user_pages[i]); 429 SetPageDirty(user_pages[i]);
430 page_cache_release(user_pages[i]); 430 page_cache_release(user_pages[i]);
431 } 431 }
432 kfree(user_pages); 432 drm_free_large(user_pages);
433 433
434 return ret; 434 return ret;
435} 435}
@@ -649,7 +649,7 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 649 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
650 num_pages = last_data_page - first_data_page + 1; 650 num_pages = last_data_page - first_data_page + 1;
651 651
652 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 652 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
653 if (user_pages == NULL) 653 if (user_pages == NULL)
654 return -ENOMEM; 654 return -ENOMEM;
655 655
@@ -719,7 +719,7 @@ out_unlock:
719out_unpin_pages: 719out_unpin_pages:
720 for (i = 0; i < pinned_pages; i++) 720 for (i = 0; i < pinned_pages; i++)
721 page_cache_release(user_pages[i]); 721 page_cache_release(user_pages[i]);
722 kfree(user_pages); 722 drm_free_large(user_pages);
723 723
724 return ret; 724 return ret;
725} 725}
@@ -824,7 +824,7 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE; 824 last_data_page = (data_ptr + args->size - 1) / PAGE_SIZE;
825 num_pages = last_data_page - first_data_page + 1; 825 num_pages = last_data_page - first_data_page + 1;
826 826
827 user_pages = kcalloc(num_pages, sizeof(struct page *), GFP_KERNEL); 827 user_pages = drm_calloc_large(num_pages, sizeof(struct page *));
828 if (user_pages == NULL) 828 if (user_pages == NULL)
829 return -ENOMEM; 829 return -ENOMEM;
830 830
@@ -902,7 +902,7 @@ fail_unlock:
902fail_put_user_pages: 902fail_put_user_pages:
903 for (i = 0; i < pinned_pages; i++) 903 for (i = 0; i < pinned_pages; i++)
904 page_cache_release(user_pages[i]); 904 page_cache_release(user_pages[i]);
905 kfree(user_pages); 905 drm_free_large(user_pages);
906 906
907 return ret; 907 return ret;
908} 908}
@@ -989,10 +989,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
989 return -ENODEV; 989 return -ENODEV;
990 990
991 /* Only handle setting domains to types used by the CPU. */ 991 /* Only handle setting domains to types used by the CPU. */
992 if (write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 992 if (write_domain & I915_GEM_GPU_DOMAINS)
993 return -EINVAL; 993 return -EINVAL;
994 994
995 if (read_domains & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) 995 if (read_domains & I915_GEM_GPU_DOMAINS)
996 return -EINVAL; 996 return -EINVAL;
997 997
998 /* Having something in the write domain implies it's in the read 998 /* Having something in the write domain implies it's in the read
@@ -1145,7 +1145,14 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1145 mutex_unlock(&dev->struct_mutex); 1145 mutex_unlock(&dev->struct_mutex);
1146 return VM_FAULT_SIGBUS; 1146 return VM_FAULT_SIGBUS;
1147 } 1147 }
1148 list_add(&obj_priv->list, &dev_priv->mm.inactive_list); 1148
1149 ret = i915_gem_object_set_to_gtt_domain(obj, write);
1150 if (ret) {
1151 mutex_unlock(&dev->struct_mutex);
1152 return VM_FAULT_SIGBUS;
1153 }
1154
1155 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1149 } 1156 }
1150 1157
1151 /* Need a new fence register? */ 1158 /* Need a new fence register? */
@@ -1375,7 +1382,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
1375 mutex_unlock(&dev->struct_mutex); 1382 mutex_unlock(&dev->struct_mutex);
1376 return ret; 1383 return ret;
1377 } 1384 }
1378 list_add(&obj_priv->list, &dev_priv->mm.inactive_list); 1385 list_add_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
1379 } 1386 }
1380 1387
1381 drm_gem_object_unreference(obj); 1388 drm_gem_object_unreference(obj);
@@ -1408,9 +1415,7 @@ i915_gem_object_put_pages(struct drm_gem_object *obj)
1408 } 1415 }
1409 obj_priv->dirty = 0; 1416 obj_priv->dirty = 0;
1410 1417
1411 drm_free(obj_priv->pages, 1418 drm_free_large(obj_priv->pages);
1412 page_count * sizeof(struct page *),
1413 DRM_MEM_DRIVER);
1414 obj_priv->pages = NULL; 1419 obj_priv->pages = NULL;
1415} 1420}
1416 1421
@@ -1476,14 +1481,19 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
1476 * Returned sequence numbers are nonzero on success. 1481 * Returned sequence numbers are nonzero on success.
1477 */ 1482 */
1478static uint32_t 1483static uint32_t
1479i915_add_request(struct drm_device *dev, uint32_t flush_domains) 1484i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
1485 uint32_t flush_domains)
1480{ 1486{
1481 drm_i915_private_t *dev_priv = dev->dev_private; 1487 drm_i915_private_t *dev_priv = dev->dev_private;
1488 struct drm_i915_file_private *i915_file_priv = NULL;
1482 struct drm_i915_gem_request *request; 1489 struct drm_i915_gem_request *request;
1483 uint32_t seqno; 1490 uint32_t seqno;
1484 int was_empty; 1491 int was_empty;
1485 RING_LOCALS; 1492 RING_LOCALS;
1486 1493
1494 if (file_priv != NULL)
1495 i915_file_priv = file_priv->driver_priv;
1496
1487 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); 1497 request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
1488 if (request == NULL) 1498 if (request == NULL)
1489 return 0; 1499 return 0;
@@ -1510,6 +1520,12 @@ i915_add_request(struct drm_device *dev, uint32_t flush_domains)
1510 request->emitted_jiffies = jiffies; 1520 request->emitted_jiffies = jiffies;
1511 was_empty = list_empty(&dev_priv->mm.request_list); 1521 was_empty = list_empty(&dev_priv->mm.request_list);
1512 list_add_tail(&request->list, &dev_priv->mm.request_list); 1522 list_add_tail(&request->list, &dev_priv->mm.request_list);
1523 if (i915_file_priv) {
1524 list_add_tail(&request->client_list,
1525 &i915_file_priv->mm.request_list);
1526 } else {
1527 INIT_LIST_HEAD(&request->client_list);
1528 }
1513 1529
1514 /* Associate any objects on the flushing list matching the write 1530 /* Associate any objects on the flushing list matching the write
1515 * domain we're flushing with our flush. 1531 * domain we're flushing with our flush.
@@ -1659,6 +1675,7 @@ i915_gem_retire_requests(struct drm_device *dev)
1659 i915_gem_retire_request(dev, request); 1675 i915_gem_retire_request(dev, request);
1660 1676
1661 list_del(&request->list); 1677 list_del(&request->list);
1678 list_del(&request->client_list);
1662 drm_free(request, sizeof(*request), DRM_MEM_DRIVER); 1679 drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
1663 } else 1680 } else
1664 break; 1681 break;
@@ -1697,7 +1714,10 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
1697 BUG_ON(seqno == 0); 1714 BUG_ON(seqno == 0);
1698 1715
1699 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { 1716 if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
1700 ier = I915_READ(IER); 1717 if (IS_IGDNG(dev))
1718 ier = I915_READ(DEIER) | I915_READ(GTIER);
1719 else
1720 ier = I915_READ(IER);
1701 if (!ier) { 1721 if (!ier) {
1702 DRM_ERROR("something (likely vbetool) disabled " 1722 DRM_ERROR("something (likely vbetool) disabled "
1703 "interrupts, re-enabling\n"); 1723 "interrupts, re-enabling\n");
@@ -1749,8 +1769,7 @@ i915_gem_flush(struct drm_device *dev,
1749 if (flush_domains & I915_GEM_DOMAIN_CPU) 1769 if (flush_domains & I915_GEM_DOMAIN_CPU)
1750 drm_agp_chipset_flush(dev); 1770 drm_agp_chipset_flush(dev);
1751 1771
1752 if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | 1772 if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
1753 I915_GEM_DOMAIN_GTT)) {
1754 /* 1773 /*
1755 * read/write caches: 1774 * read/write caches:
1756 * 1775 *
@@ -1972,7 +1991,7 @@ i915_gem_evict_something(struct drm_device *dev)
1972 i915_gem_flush(dev, 1991 i915_gem_flush(dev,
1973 obj->write_domain, 1992 obj->write_domain,
1974 obj->write_domain); 1993 obj->write_domain);
1975 i915_add_request(dev, obj->write_domain); 1994 i915_add_request(dev, NULL, obj->write_domain);
1976 1995
1977 obj = NULL; 1996 obj = NULL;
1978 continue; 1997 continue;
@@ -1986,7 +2005,7 @@ i915_gem_evict_something(struct drm_device *dev)
1986 /* If we didn't do any of the above, there's nothing to be done 2005 /* If we didn't do any of the above, there's nothing to be done
1987 * and we just can't fit it in. 2006 * and we just can't fit it in.
1988 */ 2007 */
1989 return -ENOMEM; 2008 return -ENOSPC;
1990 } 2009 }
1991 return ret; 2010 return ret;
1992} 2011}
@@ -2001,7 +2020,7 @@ i915_gem_evict_everything(struct drm_device *dev)
2001 if (ret != 0) 2020 if (ret != 0)
2002 break; 2021 break;
2003 } 2022 }
2004 if (ret == -ENOMEM) 2023 if (ret == -ENOSPC)
2005 return 0; 2024 return 0;
2006 return ret; 2025 return ret;
2007} 2026}
@@ -2024,8 +2043,7 @@ i915_gem_object_get_pages(struct drm_gem_object *obj)
2024 */ 2043 */
2025 page_count = obj->size / PAGE_SIZE; 2044 page_count = obj->size / PAGE_SIZE;
2026 BUG_ON(obj_priv->pages != NULL); 2045 BUG_ON(obj_priv->pages != NULL);
2027 obj_priv->pages = drm_calloc(page_count, sizeof(struct page *), 2046 obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
2028 DRM_MEM_DRIVER);
2029 if (obj_priv->pages == NULL) { 2047 if (obj_priv->pages == NULL) {
2030 DRM_ERROR("Faled to allocate page list\n"); 2048 DRM_ERROR("Faled to allocate page list\n");
2031 obj_priv->pages_refcount--; 2049 obj_priv->pages_refcount--;
@@ -2131,8 +2149,10 @@ static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
2131 return; 2149 return;
2132 } 2150 }
2133 2151
2134 pitch_val = (obj_priv->stride / 128) - 1; 2152 pitch_val = obj_priv->stride / 128;
2135 WARN_ON(pitch_val & ~0x0000000f); 2153 pitch_val = ffs(pitch_val) - 1;
2154 WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
2155
2136 val = obj_priv->gtt_offset; 2156 val = obj_priv->gtt_offset;
2137 if (obj_priv->tiling_mode == I915_TILING_Y) 2157 if (obj_priv->tiling_mode == I915_TILING_Y)
2138 val |= 1 << I830_FENCE_TILING_Y_SHIFT; 2158 val |= 1 << I830_FENCE_TILING_Y_SHIFT;
@@ -2209,7 +2229,7 @@ try_again:
2209 loff_t offset; 2229 loff_t offset;
2210 2230
2211 if (avail == 0) 2231 if (avail == 0)
2212 return -ENOMEM; 2232 return -ENOSPC;
2213 2233
2214 for (i = dev_priv->fence_reg_start; 2234 for (i = dev_priv->fence_reg_start;
2215 i < dev_priv->num_fence_regs; i++) { 2235 i < dev_priv->num_fence_regs; i++) {
@@ -2242,7 +2262,7 @@ try_again:
2242 i915_gem_flush(dev, 2262 i915_gem_flush(dev,
2243 I915_GEM_GPU_DOMAINS, 2263 I915_GEM_GPU_DOMAINS,
2244 I915_GEM_GPU_DOMAINS); 2264 I915_GEM_GPU_DOMAINS);
2245 seqno = i915_add_request(dev, 2265 seqno = i915_add_request(dev, NULL,
2246 I915_GEM_GPU_DOMAINS); 2266 I915_GEM_GPU_DOMAINS);
2247 if (seqno == 0) 2267 if (seqno == 0)
2248 return -ENOMEM; 2268 return -ENOMEM;
@@ -2254,9 +2274,6 @@ try_again:
2254 goto try_again; 2274 goto try_again;
2255 } 2275 }
2256 2276
2257 BUG_ON(old_obj_priv->active ||
2258 (reg->obj->write_domain & I915_GEM_GPU_DOMAINS));
2259
2260 /* 2277 /*
2261 * Zap this virtual mapping so we can set up a fence again 2278 * Zap this virtual mapping so we can set up a fence again
2262 * for this object next time we need it. 2279 * for this object next time we need it.
@@ -2361,7 +2378,7 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2361 spin_unlock(&dev_priv->mm.active_list_lock); 2378 spin_unlock(&dev_priv->mm.active_list_lock);
2362 if (lists_empty) { 2379 if (lists_empty) {
2363 DRM_ERROR("GTT full, but LRU list empty\n"); 2380 DRM_ERROR("GTT full, but LRU list empty\n");
2364 return -ENOMEM; 2381 return -ENOSPC;
2365 } 2382 }
2366 2383
2367 ret = i915_gem_evict_something(dev); 2384 ret = i915_gem_evict_something(dev);
@@ -2406,8 +2423,8 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
2406 * wasn't in the GTT, there shouldn't be any way it could have been in 2423 * wasn't in the GTT, there shouldn't be any way it could have been in
2407 * a GPU cache 2424 * a GPU cache
2408 */ 2425 */
2409 BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2426 BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
2410 BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 2427 BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
2411 2428
2412 return 0; 2429 return 0;
2413} 2430}
@@ -2424,6 +2441,16 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
2424 if (obj_priv->pages == NULL) 2441 if (obj_priv->pages == NULL)
2425 return; 2442 return;
2426 2443
2444 /* XXX: The 865 in particular appears to be weird in how it handles
2445 * cache flushing. We haven't figured it out, but the
2446 * clflush+agp_chipset_flush doesn't appear to successfully get the
2447 * data visible to the PGU, while wbinvd + agp_chipset_flush does.
2448 */
2449 if (IS_I865G(obj->dev)) {
2450 wbinvd();
2451 return;
2452 }
2453
2427 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE); 2454 drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
2428} 2455}
2429 2456
@@ -2439,7 +2466,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
2439 2466
2440 /* Queue the GPU write cache flushing we need. */ 2467 /* Queue the GPU write cache flushing we need. */
2441 i915_gem_flush(dev, 0, obj->write_domain); 2468 i915_gem_flush(dev, 0, obj->write_domain);
2442 seqno = i915_add_request(dev, obj->write_domain); 2469 seqno = i915_add_request(dev, NULL, obj->write_domain);
2443 obj->write_domain = 0; 2470 obj->write_domain = 0;
2444 i915_gem_object_move_to_active(obj, seqno); 2471 i915_gem_object_move_to_active(obj, seqno);
2445} 2472}
@@ -3022,20 +3049,12 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3022 drm_i915_private_t *dev_priv = dev->dev_private; 3049 drm_i915_private_t *dev_priv = dev->dev_private;
3023 int nbox = exec->num_cliprects; 3050 int nbox = exec->num_cliprects;
3024 int i = 0, count; 3051 int i = 0, count;
3025 uint32_t exec_start, exec_len; 3052 uint32_t exec_start, exec_len;
3026 RING_LOCALS; 3053 RING_LOCALS;
3027 3054
3028 exec_start = (uint32_t) exec_offset + exec->batch_start_offset; 3055 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3029 exec_len = (uint32_t) exec->batch_len; 3056 exec_len = (uint32_t) exec->batch_len;
3030 3057
3031 if ((exec_start | exec_len) & 0x7) {
3032 DRM_ERROR("alignment\n");
3033 return -EINVAL;
3034 }
3035
3036 if (!exec_start)
3037 return -EINVAL;
3038
3039 count = nbox ? nbox : 1; 3058 count = nbox ? nbox : 1;
3040 3059
3041 for (i = 0; i < count; i++) { 3060 for (i = 0; i < count; i++) {
@@ -3076,6 +3095,10 @@ i915_dispatch_gem_execbuffer(struct drm_device *dev,
3076/* Throttle our rendering by waiting until the ring has completed our requests 3095/* Throttle our rendering by waiting until the ring has completed our requests
3077 * emitted over 20 msec ago. 3096 * emitted over 20 msec ago.
3078 * 3097 *
3098 * Note that if we were to use the current jiffies each time around the loop,
3099 * we wouldn't escape the function with any frames outstanding if the time to
3100 * render a frame was over 20ms.
3101 *
3079 * This should get us reasonable parallelism between CPU and GPU but also 3102 * This should get us reasonable parallelism between CPU and GPU but also
3080 * relatively low latency when blocking on a particular request to finish. 3103 * relatively low latency when blocking on a particular request to finish.
3081 */ 3104 */
@@ -3084,15 +3107,25 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
3084{ 3107{
3085 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; 3108 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3086 int ret = 0; 3109 int ret = 0;
3087 uint32_t seqno; 3110 unsigned long recent_enough = jiffies - msecs_to_jiffies(20);
3088 3111
3089 mutex_lock(&dev->struct_mutex); 3112 mutex_lock(&dev->struct_mutex);
3090 seqno = i915_file_priv->mm.last_gem_throttle_seqno; 3113 while (!list_empty(&i915_file_priv->mm.request_list)) {
3091 i915_file_priv->mm.last_gem_throttle_seqno = 3114 struct drm_i915_gem_request *request;
3092 i915_file_priv->mm.last_gem_seqno; 3115
3093 if (seqno) 3116 request = list_first_entry(&i915_file_priv->mm.request_list,
3094 ret = i915_wait_request(dev, seqno); 3117 struct drm_i915_gem_request,
3118 client_list);
3119
3120 if (time_after_eq(request->emitted_jiffies, recent_enough))
3121 break;
3122
3123 ret = i915_wait_request(dev, request->seqno);
3124 if (ret != 0)
3125 break;
3126 }
3095 mutex_unlock(&dev->struct_mutex); 3127 mutex_unlock(&dev->struct_mutex);
3128
3096 return ret; 3129 return ret;
3097} 3130}
3098 3131
@@ -3111,7 +3144,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3111 reloc_count += exec_list[i].relocation_count; 3144 reloc_count += exec_list[i].relocation_count;
3112 } 3145 }
3113 3146
3114 *relocs = drm_calloc(reloc_count, sizeof(**relocs), DRM_MEM_DRIVER); 3147 *relocs = drm_calloc_large(reloc_count, sizeof(**relocs));
3115 if (*relocs == NULL) 3148 if (*relocs == NULL)
3116 return -ENOMEM; 3149 return -ENOMEM;
3117 3150
@@ -3125,8 +3158,7 @@ i915_gem_get_relocs_from_user(struct drm_i915_gem_exec_object *exec_list,
3125 exec_list[i].relocation_count * 3158 exec_list[i].relocation_count *
3126 sizeof(**relocs)); 3159 sizeof(**relocs));
3127 if (ret != 0) { 3160 if (ret != 0) {
3128 drm_free(*relocs, reloc_count * sizeof(**relocs), 3161 drm_free_large(*relocs);
3129 DRM_MEM_DRIVER);
3130 *relocs = NULL; 3162 *relocs = NULL;
3131 return -EFAULT; 3163 return -EFAULT;
3132 } 3164 }
@@ -3165,17 +3197,34 @@ i915_gem_put_relocs_to_user(struct drm_i915_gem_exec_object *exec_list,
3165 } 3197 }
3166 3198
3167err: 3199err:
3168 drm_free(relocs, reloc_count * sizeof(*relocs), DRM_MEM_DRIVER); 3200 drm_free_large(relocs);
3169 3201
3170 return ret; 3202 return ret;
3171} 3203}
3172 3204
3205static int
3206i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
3207 uint64_t exec_offset)
3208{
3209 uint32_t exec_start, exec_len;
3210
3211 exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
3212 exec_len = (uint32_t) exec->batch_len;
3213
3214 if ((exec_start | exec_len) & 0x7)
3215 return -EINVAL;
3216
3217 if (!exec_start)
3218 return -EINVAL;
3219
3220 return 0;
3221}
3222
3173int 3223int
3174i915_gem_execbuffer(struct drm_device *dev, void *data, 3224i915_gem_execbuffer(struct drm_device *dev, void *data,
3175 struct drm_file *file_priv) 3225 struct drm_file *file_priv)
3176{ 3226{
3177 drm_i915_private_t *dev_priv = dev->dev_private; 3227 drm_i915_private_t *dev_priv = dev->dev_private;
3178 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
3179 struct drm_i915_gem_execbuffer *args = data; 3228 struct drm_i915_gem_execbuffer *args = data;
3180 struct drm_i915_gem_exec_object *exec_list = NULL; 3229 struct drm_i915_gem_exec_object *exec_list = NULL;
3181 struct drm_gem_object **object_list = NULL; 3230 struct drm_gem_object **object_list = NULL;
@@ -3198,10 +3247,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3198 return -EINVAL; 3247 return -EINVAL;
3199 } 3248 }
3200 /* Copy in the exec list from userland */ 3249 /* Copy in the exec list from userland */
3201 exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, 3250 exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
3202 DRM_MEM_DRIVER); 3251 object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
3203 object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
3204 DRM_MEM_DRIVER);
3205 if (exec_list == NULL || object_list == NULL) { 3252 if (exec_list == NULL || object_list == NULL) {
3206 DRM_ERROR("Failed to allocate exec or object list " 3253 DRM_ERROR("Failed to allocate exec or object list "
3207 "for %d buffers\n", 3254 "for %d buffers\n",
@@ -3302,7 +3349,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3302 break; 3349 break;
3303 3350
3304 /* error other than GTT full, or we've already tried again */ 3351 /* error other than GTT full, or we've already tried again */
3305 if (ret != -ENOMEM || pin_tries >= 1) { 3352 if (ret != -ENOSPC || pin_tries >= 1) {
3306 if (ret != -ERESTARTSYS) 3353 if (ret != -ERESTARTSYS)
3307 DRM_ERROR("Failed to pin buffers %d\n", ret); 3354 DRM_ERROR("Failed to pin buffers %d\n", ret);
3308 goto err; 3355 goto err;
@@ -3321,8 +3368,20 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3321 3368
3322 /* Set the pending read domains for the batch buffer to COMMAND */ 3369 /* Set the pending read domains for the batch buffer to COMMAND */
3323 batch_obj = object_list[args->buffer_count-1]; 3370 batch_obj = object_list[args->buffer_count-1];
3324 batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; 3371 if (batch_obj->pending_write_domain) {
3325 batch_obj->pending_write_domain = 0; 3372 DRM_ERROR("Attempting to use self-modifying batch buffer\n");
3373 ret = -EINVAL;
3374 goto err;
3375 }
3376 batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
3377
3378 /* Sanity check the batch buffer, prior to moving objects */
3379 exec_offset = exec_list[args->buffer_count - 1].offset;
3380 ret = i915_gem_check_execbuffer (args, exec_offset);
3381 if (ret != 0) {
3382 DRM_ERROR("execbuf with invalid offset/length\n");
3383 goto err;
3384 }
3326 3385
3327 i915_verify_inactive(dev, __FILE__, __LINE__); 3386 i915_verify_inactive(dev, __FILE__, __LINE__);
3328 3387
@@ -3353,7 +3412,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3353 dev->invalidate_domains, 3412 dev->invalidate_domains,
3354 dev->flush_domains); 3413 dev->flush_domains);
3355 if (dev->flush_domains) 3414 if (dev->flush_domains)
3356 (void)i915_add_request(dev, dev->flush_domains); 3415 (void)i915_add_request(dev, file_priv,
3416 dev->flush_domains);
3357 } 3417 }
3358 3418
3359 for (i = 0; i < args->buffer_count; i++) { 3419 for (i = 0; i < args->buffer_count; i++) {
@@ -3371,8 +3431,6 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3371 } 3431 }
3372#endif 3432#endif
3373 3433
3374 exec_offset = exec_list[args->buffer_count - 1].offset;
3375
3376#if WATCH_EXEC 3434#if WATCH_EXEC
3377 i915_gem_dump_object(batch_obj, 3435 i915_gem_dump_object(batch_obj,
3378 args->batch_len, 3436 args->batch_len,
@@ -3402,9 +3460,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
3402 * *some* interrupts representing completion of buffers that we can 3460 * *some* interrupts representing completion of buffers that we can
3403 * wait on when trying to clear up gtt space). 3461 * wait on when trying to clear up gtt space).
3404 */ 3462 */
3405 seqno = i915_add_request(dev, flush_domains); 3463 seqno = i915_add_request(dev, file_priv, flush_domains);
3406 BUG_ON(seqno == 0); 3464 BUG_ON(seqno == 0);
3407 i915_file_priv->mm.last_gem_seqno = seqno;
3408 for (i = 0; i < args->buffer_count; i++) { 3465 for (i = 0; i < args->buffer_count; i++) {
3409 struct drm_gem_object *obj = object_list[i]; 3466 struct drm_gem_object *obj = object_list[i];
3410 3467
@@ -3462,10 +3519,8 @@ err:
3462 } 3519 }
3463 3520
3464pre_mutex_err: 3521pre_mutex_err:
3465 drm_free(object_list, sizeof(*object_list) * args->buffer_count, 3522 drm_free_large(object_list);
3466 DRM_MEM_DRIVER); 3523 drm_free_large(exec_list);
3467 drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
3468 DRM_MEM_DRIVER);
3469 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects, 3524 drm_free(cliprects, sizeof(*cliprects) * args->num_cliprects,
3470 DRM_MEM_DRIVER); 3525 DRM_MEM_DRIVER);
3471 3526
@@ -3512,8 +3567,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
3512 atomic_inc(&dev->pin_count); 3567 atomic_inc(&dev->pin_count);
3513 atomic_add(obj->size, &dev->pin_memory); 3568 atomic_add(obj->size, &dev->pin_memory);
3514 if (!obj_priv->active && 3569 if (!obj_priv->active &&
3515 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 3570 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
3516 I915_GEM_DOMAIN_GTT)) == 0 &&
3517 !list_empty(&obj_priv->list)) 3571 !list_empty(&obj_priv->list))
3518 list_del_init(&obj_priv->list); 3572 list_del_init(&obj_priv->list);
3519 } 3573 }
@@ -3540,8 +3594,7 @@ i915_gem_object_unpin(struct drm_gem_object *obj)
3540 */ 3594 */
3541 if (obj_priv->pin_count == 0) { 3595 if (obj_priv->pin_count == 0) {
3542 if (!obj_priv->active && 3596 if (!obj_priv->active &&
3543 (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | 3597 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
3544 I915_GEM_DOMAIN_GTT)) == 0)
3545 list_move_tail(&obj_priv->list, 3598 list_move_tail(&obj_priv->list,
3546 &dev_priv->mm.inactive_list); 3599 &dev_priv->mm.inactive_list);
3547 atomic_dec(&dev->pin_count); 3600 atomic_dec(&dev->pin_count);
@@ -3645,15 +3698,14 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3645 struct drm_gem_object *obj; 3698 struct drm_gem_object *obj;
3646 struct drm_i915_gem_object *obj_priv; 3699 struct drm_i915_gem_object *obj_priv;
3647 3700
3648 mutex_lock(&dev->struct_mutex);
3649 obj = drm_gem_object_lookup(dev, file_priv, args->handle); 3701 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
3650 if (obj == NULL) { 3702 if (obj == NULL) {
3651 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", 3703 DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
3652 args->handle); 3704 args->handle);
3653 mutex_unlock(&dev->struct_mutex);
3654 return -EBADF; 3705 return -EBADF;
3655 } 3706 }
3656 3707
3708 mutex_lock(&dev->struct_mutex);
3657 /* Update the active list for the hardware's current position. 3709 /* Update the active list for the hardware's current position.
3658 * Otherwise this only updates on a delayed timer or when irqs are 3710 * Otherwise this only updates on a delayed timer or when irqs are
3659 * actually unmasked, and our working set ends up being larger than 3711 * actually unmasked, and our working set ends up being larger than
@@ -3792,9 +3844,8 @@ i915_gem_idle(struct drm_device *dev)
3792 3844
3793 /* Flush the GPU along with all non-CPU write domains 3845 /* Flush the GPU along with all non-CPU write domains
3794 */ 3846 */
3795 i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), 3847 i915_gem_flush(dev, I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
3796 ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); 3848 seqno = i915_add_request(dev, NULL, I915_GEM_GPU_DOMAINS);
3797 seqno = i915_add_request(dev, ~I915_GEM_DOMAIN_CPU);
3798 3849
3799 if (seqno == 0) { 3850 if (seqno == 0) {
3800 mutex_unlock(&dev->struct_mutex); 3851 mutex_unlock(&dev->struct_mutex);
@@ -4344,3 +4395,17 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
4344 drm_agp_chipset_flush(dev); 4395 drm_agp_chipset_flush(dev);
4345 return 0; 4396 return 0;
4346} 4397}
4398
4399void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv)
4400{
4401 struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
4402
4403 /* Clean up our request list when the client is going away, so that
4404 * later retire_requests won't dereference our soon-to-be-gone
4405 * file_priv.
4406 */
4407 mutex_lock(&dev->struct_mutex);
4408 while (!list_empty(&i915_file_priv->mm.request_list))
4409 list_del_init(i915_file_priv->mm.request_list.next);
4410 mutex_unlock(&dev->struct_mutex);
4411}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 52a059354e83..9a05cadaa4ad 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -25,6 +25,8 @@
25 * 25 *
26 */ 26 */
27 27
28#include <linux/acpi.h>
29#include <linux/pnp.h>
28#include "linux/string.h" 30#include "linux/string.h"
29#include "linux/bitops.h" 31#include "linux/bitops.h"
30#include "drmP.h" 32#include "drmP.h"
@@ -81,6 +83,143 @@
81 * to match what the GPU expects. 83 * to match what the GPU expects.
82 */ 84 */
83 85
86#define MCHBAR_I915 0x44
87#define MCHBAR_I965 0x48
88#define MCHBAR_SIZE (4*4096)
89
90#define DEVEN_REG 0x54
91#define DEVEN_MCHBAR_EN (1 << 28)
92
93/* Allocate space for the MCH regs if needed, return nonzero on error */
94static int
95intel_alloc_mchbar_resource(struct drm_device *dev)
96{
97 struct pci_dev *bridge_dev;
98 drm_i915_private_t *dev_priv = dev->dev_private;
99 int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
100 u32 temp_lo, temp_hi = 0;
101 u64 mchbar_addr;
102 int ret = 0;
103
104 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
105 if (!bridge_dev) {
106 DRM_DEBUG("no bridge dev?!\n");
107 ret = -ENODEV;
108 goto out;
109 }
110
111 if (IS_I965G(dev))
112 pci_read_config_dword(bridge_dev, reg + 4, &temp_hi);
113 pci_read_config_dword(bridge_dev, reg, &temp_lo);
114 mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
115
116 /* If ACPI doesn't have it, assume we need to allocate it ourselves */
117 if (mchbar_addr &&
118 pnp_range_reserved(mchbar_addr, mchbar_addr + MCHBAR_SIZE)) {
119 ret = 0;
120 goto out_put;
121 }
122
123 /* Get some space for it */
124 ret = pci_bus_alloc_resource(bridge_dev->bus, &dev_priv->mch_res,
125 MCHBAR_SIZE, MCHBAR_SIZE,
126 PCIBIOS_MIN_MEM,
127 0, pcibios_align_resource,
128 bridge_dev);
129 if (ret) {
130 DRM_DEBUG("failed bus alloc: %d\n", ret);
131 dev_priv->mch_res.start = 0;
132 goto out_put;
133 }
134
135 if (IS_I965G(dev))
136 pci_write_config_dword(bridge_dev, reg + 4,
137 upper_32_bits(dev_priv->mch_res.start));
138
139 pci_write_config_dword(bridge_dev, reg,
140 lower_32_bits(dev_priv->mch_res.start));
141out_put:
142 pci_dev_put(bridge_dev);
143out:
144 return ret;
145}
146
147/* Setup MCHBAR if possible, return true if we should disable it again */
148static bool
149intel_setup_mchbar(struct drm_device *dev)
150{
151 struct pci_dev *bridge_dev;
152 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
153 u32 temp;
154 bool need_disable = false, enabled;
155
156 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
157 if (!bridge_dev) {
158 DRM_DEBUG("no bridge dev?!\n");
159 goto out;
160 }
161
162 if (IS_I915G(dev) || IS_I915GM(dev)) {
163 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
164 enabled = !!(temp & DEVEN_MCHBAR_EN);
165 } else {
166 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
167 enabled = temp & 1;
168 }
169
170 /* If it's already enabled, don't have to do anything */
171 if (enabled)
172 goto out_put;
173
174 if (intel_alloc_mchbar_resource(dev))
175 goto out_put;
176
177 need_disable = true;
178
179 /* Space is allocated or reserved, so enable it. */
180 if (IS_I915G(dev) || IS_I915GM(dev)) {
181 pci_write_config_dword(bridge_dev, DEVEN_REG,
182 temp | DEVEN_MCHBAR_EN);
183 } else {
184 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
185 pci_write_config_dword(bridge_dev, mchbar_reg, temp | 1);
186 }
187out_put:
188 pci_dev_put(bridge_dev);
189out:
190 return need_disable;
191}
192
193static void
194intel_teardown_mchbar(struct drm_device *dev, bool disable)
195{
196 drm_i915_private_t *dev_priv = dev->dev_private;
197 struct pci_dev *bridge_dev;
198 int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
199 u32 temp;
200
201 bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
202 if (!bridge_dev) {
203 DRM_DEBUG("no bridge dev?!\n");
204 return;
205 }
206
207 if (disable) {
208 if (IS_I915G(dev) || IS_I915GM(dev)) {
209 pci_read_config_dword(bridge_dev, DEVEN_REG, &temp);
210 temp &= ~DEVEN_MCHBAR_EN;
211 pci_write_config_dword(bridge_dev, DEVEN_REG, temp);
212 } else {
213 pci_read_config_dword(bridge_dev, mchbar_reg, &temp);
214 temp &= ~1;
215 pci_write_config_dword(bridge_dev, mchbar_reg, temp);
216 }
217 }
218
219 if (dev_priv->mch_res.start)
220 release_resource(&dev_priv->mch_res);
221}
222
84/** 223/**
85 * Detects bit 6 swizzling of address lookup between IGD access and CPU 224 * Detects bit 6 swizzling of address lookup between IGD access and CPU
86 * access through main memory. 225 * access through main memory.
@@ -91,6 +230,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
91 drm_i915_private_t *dev_priv = dev->dev_private; 230 drm_i915_private_t *dev_priv = dev->dev_private;
92 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 231 uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
93 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 232 uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
233 bool need_disable;
94 234
95 if (!IS_I9XX(dev)) { 235 if (!IS_I9XX(dev)) {
96 /* As far as we know, the 865 doesn't have these bit 6 236 /* As far as we know, the 865 doesn't have these bit 6
@@ -101,6 +241,9 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
101 } else if (IS_MOBILE(dev)) { 241 } else if (IS_MOBILE(dev)) {
102 uint32_t dcc; 242 uint32_t dcc;
103 243
244 /* Try to make sure MCHBAR is enabled before poking at it */
245 need_disable = intel_setup_mchbar(dev);
246
104 /* On mobile 9xx chipsets, channel interleave by the CPU is 247 /* On mobile 9xx chipsets, channel interleave by the CPU is
105 * determined by DCC. For single-channel, neither the CPU 248 * determined by DCC. For single-channel, neither the CPU
106 * nor the GPU do swizzling. For dual channel interleaved, 249 * nor the GPU do swizzling. For dual channel interleaved,
@@ -140,6 +283,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
140 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; 283 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
141 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; 284 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
142 } 285 }
286
287 intel_teardown_mchbar(dev, need_disable);
143 } else { 288 } else {
144 /* The 965, G33, and newer, have a very flexible memory 289 /* The 965, G33, and newer, have a very flexible memory
145 * configuration. It will enable dual-channel mode 290 * configuration. It will enable dual-channel mode
@@ -170,6 +315,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
170 } 315 }
171 } 316 }
172 317
318 /* FIXME: check with memory config on IGDNG */
319 if (IS_IGDNG(dev)) {
320 DRM_ERROR("disable tiling on IGDNG...\n");
321 swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
322 swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
323 }
324
173 dev_priv->mm.bit_6_swizzle_x = swizzle_x; 325 dev_priv->mm.bit_6_swizzle_x = swizzle_x;
174 dev_priv->mm.bit_6_swizzle_y = swizzle_y; 326 dev_priv->mm.bit_6_swizzle_y = swizzle_y;
175} 327}
@@ -213,7 +365,8 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
213 if (tiling_mode == I915_TILING_NONE) 365 if (tiling_mode == I915_TILING_NONE)
214 return true; 366 return true;
215 367
216 if (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)) 368 if (!IS_I9XX(dev) ||
369 (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
217 tile_width = 128; 370 tile_width = 128;
218 else 371 else
219 tile_width = 512; 372 tile_width = 512;
@@ -225,11 +378,18 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
225 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL) 378 if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
226 return false; 379 return false;
227 } else if (IS_I9XX(dev)) { 380 } else if (IS_I9XX(dev)) {
228 if (stride / tile_width > I830_FENCE_MAX_PITCH_VAL || 381 uint32_t pitch_val = ffs(stride / tile_width) - 1;
382
383 /* XXX: For Y tiling, FENCE_MAX_PITCH_VAL is actually 6 (8KB)
384 * instead of 4 (2KB) on 945s.
385 */
386 if (pitch_val > I915_FENCE_MAX_PITCH_VAL ||
229 size > (I830_FENCE_MAX_SIZE_VAL << 20)) 387 size > (I830_FENCE_MAX_SIZE_VAL << 20))
230 return false; 388 return false;
231 } else { 389 } else {
232 if (stride / 128 > I830_FENCE_MAX_PITCH_VAL || 390 uint32_t pitch_val = ffs(stride / tile_width) - 1;
391
392 if (pitch_val > I830_FENCE_MAX_PITCH_VAL ||
233 size > (I830_FENCE_MAX_SIZE_VAL << 19)) 393 size > (I830_FENCE_MAX_SIZE_VAL << 19))
234 return false; 394 return false;
235 } 395 }
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 98bb4c878c4e..b86b7b7130c6 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -58,6 +58,47 @@
58 DRM_I915_VBLANK_PIPE_B) 58 DRM_I915_VBLANK_PIPE_B)
59 59
60void 60void
61igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
62{
63 if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
64 dev_priv->gt_irq_mask_reg &= ~mask;
65 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
66 (void) I915_READ(GTIMR);
67 }
68}
69
70static inline void
71igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
72{
73 if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
74 dev_priv->gt_irq_mask_reg |= mask;
75 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
76 (void) I915_READ(GTIMR);
77 }
78}
79
80/* For display hotplug interrupt */
81void
82igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
83{
84 if ((dev_priv->irq_mask_reg & mask) != 0) {
85 dev_priv->irq_mask_reg &= ~mask;
86 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
87 (void) I915_READ(DEIMR);
88 }
89}
90
91static inline void
92igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
93{
94 if ((dev_priv->irq_mask_reg & mask) != mask) {
95 dev_priv->irq_mask_reg |= mask;
96 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
97 (void) I915_READ(DEIMR);
98 }
99}
100
101void
61i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask) 102i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
62{ 103{
63 if ((dev_priv->irq_mask_reg & mask) != 0) { 104 if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -196,6 +237,47 @@ static void i915_hotplug_work_func(struct work_struct *work)
196 drm_sysfs_hotplug_event(dev); 237 drm_sysfs_hotplug_event(dev);
197} 238}
198 239
240irqreturn_t igdng_irq_handler(struct drm_device *dev)
241{
242 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
243 int ret = IRQ_NONE;
244 u32 de_iir, gt_iir;
245 u32 new_de_iir, new_gt_iir;
246 struct drm_i915_master_private *master_priv;
247
248 de_iir = I915_READ(DEIIR);
249 gt_iir = I915_READ(GTIIR);
250
251 for (;;) {
252 if (de_iir == 0 && gt_iir == 0)
253 break;
254
255 ret = IRQ_HANDLED;
256
257 I915_WRITE(DEIIR, de_iir);
258 new_de_iir = I915_READ(DEIIR);
259 I915_WRITE(GTIIR, gt_iir);
260 new_gt_iir = I915_READ(GTIIR);
261
262 if (dev->primary->master) {
263 master_priv = dev->primary->master->driver_priv;
264 if (master_priv->sarea_priv)
265 master_priv->sarea_priv->last_dispatch =
266 READ_BREADCRUMB(dev_priv);
267 }
268
269 if (gt_iir & GT_USER_INTERRUPT) {
270 dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
271 DRM_WAKEUP(&dev_priv->irq_queue);
272 }
273
274 de_iir = new_de_iir;
275 gt_iir = new_gt_iir;
276 }
277
278 return ret;
279}
280
199irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) 281irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
200{ 282{
201 struct drm_device *dev = (struct drm_device *) arg; 283 struct drm_device *dev = (struct drm_device *) arg;
@@ -212,6 +294,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
212 294
213 atomic_inc(&dev_priv->irq_received); 295 atomic_inc(&dev_priv->irq_received);
214 296
297 if (IS_IGDNG(dev))
298 return igdng_irq_handler(dev);
299
215 iir = I915_READ(IIR); 300 iir = I915_READ(IIR);
216 301
217 if (IS_I965G(dev)) { 302 if (IS_I965G(dev)) {
@@ -349,8 +434,12 @@ void i915_user_irq_get(struct drm_device *dev)
349 unsigned long irqflags; 434 unsigned long irqflags;
350 435
351 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 436 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
352 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) 437 if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
353 i915_enable_irq(dev_priv, I915_USER_INTERRUPT); 438 if (IS_IGDNG(dev))
439 igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
440 else
441 i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
442 }
354 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 443 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
355} 444}
356 445
@@ -361,8 +450,12 @@ void i915_user_irq_put(struct drm_device *dev)
361 450
362 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 451 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
363 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0); 452 BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
364 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) 453 if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
365 i915_disable_irq(dev_priv, I915_USER_INTERRUPT); 454 if (IS_IGDNG(dev))
455 igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
456 else
457 i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
458 }
366 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags); 459 spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
367} 460}
368 461
@@ -455,6 +548,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
455 if (!(pipeconf & PIPEACONF_ENABLE)) 548 if (!(pipeconf & PIPEACONF_ENABLE))
456 return -EINVAL; 549 return -EINVAL;
457 550
551 if (IS_IGDNG(dev))
552 return 0;
553
458 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 554 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
459 if (IS_I965G(dev)) 555 if (IS_I965G(dev))
460 i915_enable_pipestat(dev_priv, pipe, 556 i915_enable_pipestat(dev_priv, pipe,
@@ -474,6 +570,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
474 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
475 unsigned long irqflags; 571 unsigned long irqflags;
476 572
573 if (IS_IGDNG(dev))
574 return;
575
477 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags); 576 spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
478 i915_disable_pipestat(dev_priv, pipe, 577 i915_disable_pipestat(dev_priv, pipe,
479 PIPE_VBLANK_INTERRUPT_ENABLE | 578 PIPE_VBLANK_INTERRUPT_ENABLE |
@@ -484,7 +583,9 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
484void i915_enable_interrupt (struct drm_device *dev) 583void i915_enable_interrupt (struct drm_device *dev)
485{ 584{
486 struct drm_i915_private *dev_priv = dev->dev_private; 585 struct drm_i915_private *dev_priv = dev->dev_private;
487 opregion_enable_asle(dev); 586
587 if (!IS_IGDNG(dev))
588 opregion_enable_asle(dev);
488 dev_priv->irq_enabled = 1; 589 dev_priv->irq_enabled = 1;
489} 590}
490 591
@@ -545,12 +646,65 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
545 646
546/* drm_dma.h hooks 647/* drm_dma.h hooks
547*/ 648*/
649static void igdng_irq_preinstall(struct drm_device *dev)
650{
651 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
652
653 I915_WRITE(HWSTAM, 0xeffe);
654
655 /* XXX hotplug from PCH */
656
657 I915_WRITE(DEIMR, 0xffffffff);
658 I915_WRITE(DEIER, 0x0);
659 (void) I915_READ(DEIER);
660
661 /* and GT */
662 I915_WRITE(GTIMR, 0xffffffff);
663 I915_WRITE(GTIER, 0x0);
664 (void) I915_READ(GTIER);
665}
666
667static int igdng_irq_postinstall(struct drm_device *dev)
668{
669 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
670 /* enable kind of interrupts always enabled */
671 u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
672 u32 render_mask = GT_USER_INTERRUPT;
673
674 dev_priv->irq_mask_reg = ~display_mask;
675 dev_priv->de_irq_enable_reg = display_mask;
676
677 /* should always can generate irq */
678 I915_WRITE(DEIIR, I915_READ(DEIIR));
679 I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
680 I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
681 (void) I915_READ(DEIER);
682
683 /* user interrupt should be enabled, but masked initial */
684 dev_priv->gt_irq_mask_reg = 0xffffffff;
685 dev_priv->gt_irq_enable_reg = render_mask;
686
687 I915_WRITE(GTIIR, I915_READ(GTIIR));
688 I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
689 I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
690 (void) I915_READ(GTIER);
691
692 return 0;
693}
694
548void i915_driver_irq_preinstall(struct drm_device * dev) 695void i915_driver_irq_preinstall(struct drm_device * dev)
549{ 696{
550 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 697 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
551 698
552 atomic_set(&dev_priv->irq_received, 0); 699 atomic_set(&dev_priv->irq_received, 0);
553 700
701 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
702
703 if (IS_IGDNG(dev)) {
704 igdng_irq_preinstall(dev);
705 return;
706 }
707
554 if (I915_HAS_HOTPLUG(dev)) { 708 if (I915_HAS_HOTPLUG(dev)) {
555 I915_WRITE(PORT_HOTPLUG_EN, 0); 709 I915_WRITE(PORT_HOTPLUG_EN, 0);
556 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 710 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
@@ -562,7 +716,6 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
562 I915_WRITE(IMR, 0xffffffff); 716 I915_WRITE(IMR, 0xffffffff);
563 I915_WRITE(IER, 0x0); 717 I915_WRITE(IER, 0x0);
564 (void) I915_READ(IER); 718 (void) I915_READ(IER);
565 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
566} 719}
567 720
568int i915_driver_irq_postinstall(struct drm_device *dev) 721int i915_driver_irq_postinstall(struct drm_device *dev)
@@ -570,9 +723,12 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
570 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 723 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
571 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR; 724 u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
572 725
726 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
727
573 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B; 728 dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
574 729
575 dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */ 730 if (IS_IGDNG(dev))
731 return igdng_irq_postinstall(dev);
576 732
577 /* Unmask the interrupts that we always want on. */ 733 /* Unmask the interrupts that we always want on. */
578 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX; 734 dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -613,11 +769,24 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
613 (void) I915_READ(IER); 769 (void) I915_READ(IER);
614 770
615 opregion_enable_asle(dev); 771 opregion_enable_asle(dev);
616 DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
617 772
618 return 0; 773 return 0;
619} 774}
620 775
776static void igdng_irq_uninstall(struct drm_device *dev)
777{
778 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
779 I915_WRITE(HWSTAM, 0xffffffff);
780
781 I915_WRITE(DEIMR, 0xffffffff);
782 I915_WRITE(DEIER, 0x0);
783 I915_WRITE(DEIIR, I915_READ(DEIIR));
784
785 I915_WRITE(GTIMR, 0xffffffff);
786 I915_WRITE(GTIER, 0x0);
787 I915_WRITE(GTIIR, I915_READ(GTIIR));
788}
789
621void i915_driver_irq_uninstall(struct drm_device * dev) 790void i915_driver_irq_uninstall(struct drm_device * dev)
622{ 791{
623 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 792 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -627,6 +796,11 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
627 796
628 dev_priv->vblank_pipe = 0; 797 dev_priv->vblank_pipe = 0;
629 798
799 if (IS_IGDNG(dev)) {
800 igdng_irq_uninstall(dev);
801 return;
802 }
803
630 if (I915_HAS_HOTPLUG(dev)) { 804 if (I915_HAS_HOTPLUG(dev)) {
631 I915_WRITE(PORT_HOTPLUG_EN, 0); 805 I915_WRITE(PORT_HOTPLUG_EN, 0);
632 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT)); 806 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 15da44cf21b1..f6237a0b1133 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -190,7 +190,8 @@
190#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8) 190#define I830_FENCE_SIZE_BITS(size) ((ffs((size) >> 19) - 1) << 8)
191#define I830_FENCE_PITCH_SHIFT 4 191#define I830_FENCE_PITCH_SHIFT 4
192#define I830_FENCE_REG_VALID (1<<0) 192#define I830_FENCE_REG_VALID (1<<0)
193#define I830_FENCE_MAX_PITCH_VAL 0x10 193#define I915_FENCE_MAX_PITCH_VAL 0x10
194#define I830_FENCE_MAX_PITCH_VAL 6
194#define I830_FENCE_MAX_SIZE_VAL (1<<8) 195#define I830_FENCE_MAX_SIZE_VAL (1<<8)
195 196
196#define I915_FENCE_START_MASK 0x0ff00000 197#define I915_FENCE_START_MASK 0x0ff00000
@@ -449,6 +450,13 @@
449#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13) 450#define PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
450#define PLL_REF_INPUT_MASK (3 << 13) 451#define PLL_REF_INPUT_MASK (3 << 13)
451#define PLL_LOAD_PULSE_PHASE_SHIFT 9 452#define PLL_LOAD_PULSE_PHASE_SHIFT 9
453/* IGDNG */
454# define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT 9
455# define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK (7 << 9)
456# define PLL_REF_SDVO_HDMI_MULTIPLIER(x) (((x)-1) << 9)
457# define DPLL_FPA1_P1_POST_DIV_SHIFT 0
458# define DPLL_FPA1_P1_POST_DIV_MASK 0xff
459
452/* 460/*
453 * Parallel to Serial Load Pulse phase selection. 461 * Parallel to Serial Load Pulse phase selection.
454 * Selects the phase for the 10X DPLL clock for the PCIe 462 * Selects the phase for the 10X DPLL clock for the PCIe
@@ -630,8 +638,11 @@
630/* Hotplug control (945+ only) */ 638/* Hotplug control (945+ only) */
631#define PORT_HOTPLUG_EN 0x61110 639#define PORT_HOTPLUG_EN 0x61110
632#define HDMIB_HOTPLUG_INT_EN (1 << 29) 640#define HDMIB_HOTPLUG_INT_EN (1 << 29)
641#define DPB_HOTPLUG_INT_EN (1 << 29)
633#define HDMIC_HOTPLUG_INT_EN (1 << 28) 642#define HDMIC_HOTPLUG_INT_EN (1 << 28)
643#define DPC_HOTPLUG_INT_EN (1 << 28)
634#define HDMID_HOTPLUG_INT_EN (1 << 27) 644#define HDMID_HOTPLUG_INT_EN (1 << 27)
645#define DPD_HOTPLUG_INT_EN (1 << 27)
635#define SDVOB_HOTPLUG_INT_EN (1 << 26) 646#define SDVOB_HOTPLUG_INT_EN (1 << 26)
636#define SDVOC_HOTPLUG_INT_EN (1 << 25) 647#define SDVOC_HOTPLUG_INT_EN (1 << 25)
637#define TV_HOTPLUG_INT_EN (1 << 18) 648#define TV_HOTPLUG_INT_EN (1 << 18)
@@ -664,8 +675,11 @@
664 675
665#define PORT_HOTPLUG_STAT 0x61114 676#define PORT_HOTPLUG_STAT 0x61114
666#define HDMIB_HOTPLUG_INT_STATUS (1 << 29) 677#define HDMIB_HOTPLUG_INT_STATUS (1 << 29)
678#define DPB_HOTPLUG_INT_STATUS (1 << 29)
667#define HDMIC_HOTPLUG_INT_STATUS (1 << 28) 679#define HDMIC_HOTPLUG_INT_STATUS (1 << 28)
680#define DPC_HOTPLUG_INT_STATUS (1 << 28)
668#define HDMID_HOTPLUG_INT_STATUS (1 << 27) 681#define HDMID_HOTPLUG_INT_STATUS (1 << 27)
682#define DPD_HOTPLUG_INT_STATUS (1 << 27)
669#define CRT_HOTPLUG_INT_STATUS (1 << 11) 683#define CRT_HOTPLUG_INT_STATUS (1 << 11)
670#define TV_HOTPLUG_INT_STATUS (1 << 10) 684#define TV_HOTPLUG_INT_STATUS (1 << 10)
671#define CRT_HOTPLUG_MONITOR_MASK (3 << 8) 685#define CRT_HOTPLUG_MONITOR_MASK (3 << 8)
@@ -950,15 +964,15 @@
950# define DAC_A_1_3_V (0 << 4) 964# define DAC_A_1_3_V (0 << 4)
951# define DAC_A_1_1_V (1 << 4) 965# define DAC_A_1_1_V (1 << 4)
952# define DAC_A_0_7_V (2 << 4) 966# define DAC_A_0_7_V (2 << 4)
953# define DAC_A_OFF (3 << 4) 967# define DAC_A_MASK (3 << 4)
954# define DAC_B_1_3_V (0 << 2) 968# define DAC_B_1_3_V (0 << 2)
955# define DAC_B_1_1_V (1 << 2) 969# define DAC_B_1_1_V (1 << 2)
956# define DAC_B_0_7_V (2 << 2) 970# define DAC_B_0_7_V (2 << 2)
957# define DAC_B_OFF (3 << 2) 971# define DAC_B_MASK (3 << 2)
958# define DAC_C_1_3_V (0 << 0) 972# define DAC_C_1_3_V (0 << 0)
959# define DAC_C_1_1_V (1 << 0) 973# define DAC_C_1_1_V (1 << 0)
960# define DAC_C_0_7_V (2 << 0) 974# define DAC_C_0_7_V (2 << 0)
961# define DAC_C_OFF (3 << 0) 975# define DAC_C_MASK (3 << 0)
962 976
963/** 977/**
964 * CSC coefficients are stored in a floating point format with 9 bits of 978 * CSC coefficients are stored in a floating point format with 9 bits of
@@ -1327,6 +1341,163 @@
1327#define TV_V_CHROMA_0 0x68400 1341#define TV_V_CHROMA_0 0x68400
1328#define TV_V_CHROMA_42 0x684a8 1342#define TV_V_CHROMA_42 0x684a8
1329 1343
1344/* Display Port */
1345#define DP_B 0x64100
1346#define DP_C 0x64200
1347#define DP_D 0x64300
1348
1349#define DP_PORT_EN (1 << 31)
1350#define DP_PIPEB_SELECT (1 << 30)
1351
1352/* Link training mode - select a suitable mode for each stage */
1353#define DP_LINK_TRAIN_PAT_1 (0 << 28)
1354#define DP_LINK_TRAIN_PAT_2 (1 << 28)
1355#define DP_LINK_TRAIN_PAT_IDLE (2 << 28)
1356#define DP_LINK_TRAIN_OFF (3 << 28)
1357#define DP_LINK_TRAIN_MASK (3 << 28)
1358#define DP_LINK_TRAIN_SHIFT 28
1359
1360/* Signal voltages. These are mostly controlled by the other end */
1361#define DP_VOLTAGE_0_4 (0 << 25)
1362#define DP_VOLTAGE_0_6 (1 << 25)
1363#define DP_VOLTAGE_0_8 (2 << 25)
1364#define DP_VOLTAGE_1_2 (3 << 25)
1365#define DP_VOLTAGE_MASK (7 << 25)
1366#define DP_VOLTAGE_SHIFT 25
1367
1368/* Signal pre-emphasis levels, like voltages, the other end tells us what
1369 * they want
1370 */
1371#define DP_PRE_EMPHASIS_0 (0 << 22)
1372#define DP_PRE_EMPHASIS_3_5 (1 << 22)
1373#define DP_PRE_EMPHASIS_6 (2 << 22)
1374#define DP_PRE_EMPHASIS_9_5 (3 << 22)
1375#define DP_PRE_EMPHASIS_MASK (7 << 22)
1376#define DP_PRE_EMPHASIS_SHIFT 22
1377
1378/* How many wires to use. I guess 3 was too hard */
1379#define DP_PORT_WIDTH_1 (0 << 19)
1380#define DP_PORT_WIDTH_2 (1 << 19)
1381#define DP_PORT_WIDTH_4 (3 << 19)
1382#define DP_PORT_WIDTH_MASK (7 << 19)
1383
1384/* Mystic DPCD version 1.1 special mode */
1385#define DP_ENHANCED_FRAMING (1 << 18)
1386
1387/** locked once port is enabled */
1388#define DP_PORT_REVERSAL (1 << 15)
1389
1390/** sends the clock on lane 15 of the PEG for debug */
1391#define DP_CLOCK_OUTPUT_ENABLE (1 << 13)
1392
1393#define DP_SCRAMBLING_DISABLE (1 << 12)
1394
1395/** limit RGB values to avoid confusing TVs */
1396#define DP_COLOR_RANGE_16_235 (1 << 8)
1397
1398/** Turn on the audio link */
1399#define DP_AUDIO_OUTPUT_ENABLE (1 << 6)
1400
1401/** vs and hs sync polarity */
1402#define DP_SYNC_VS_HIGH (1 << 4)
1403#define DP_SYNC_HS_HIGH (1 << 3)
1404
1405/** A fantasy */
1406#define DP_DETECTED (1 << 2)
1407
1408/** The aux channel provides a way to talk to the
1409 * signal sink for DDC etc. Max packet size supported
1410 * is 20 bytes in each direction, hence the 5 fixed
1411 * data registers
1412 */
1413#define DPB_AUX_CH_CTL 0x64110
1414#define DPB_AUX_CH_DATA1 0x64114
1415#define DPB_AUX_CH_DATA2 0x64118
1416#define DPB_AUX_CH_DATA3 0x6411c
1417#define DPB_AUX_CH_DATA4 0x64120
1418#define DPB_AUX_CH_DATA5 0x64124
1419
1420#define DPC_AUX_CH_CTL 0x64210
1421#define DPC_AUX_CH_DATA1 0x64214
1422#define DPC_AUX_CH_DATA2 0x64218
1423#define DPC_AUX_CH_DATA3 0x6421c
1424#define DPC_AUX_CH_DATA4 0x64220
1425#define DPC_AUX_CH_DATA5 0x64224
1426
1427#define DPD_AUX_CH_CTL 0x64310
1428#define DPD_AUX_CH_DATA1 0x64314
1429#define DPD_AUX_CH_DATA2 0x64318
1430#define DPD_AUX_CH_DATA3 0x6431c
1431#define DPD_AUX_CH_DATA4 0x64320
1432#define DPD_AUX_CH_DATA5 0x64324
1433
1434#define DP_AUX_CH_CTL_SEND_BUSY (1 << 31)
1435#define DP_AUX_CH_CTL_DONE (1 << 30)
1436#define DP_AUX_CH_CTL_INTERRUPT (1 << 29)
1437#define DP_AUX_CH_CTL_TIME_OUT_ERROR (1 << 28)
1438#define DP_AUX_CH_CTL_TIME_OUT_400us (0 << 26)
1439#define DP_AUX_CH_CTL_TIME_OUT_600us (1 << 26)
1440#define DP_AUX_CH_CTL_TIME_OUT_800us (2 << 26)
1441#define DP_AUX_CH_CTL_TIME_OUT_1600us (3 << 26)
1442#define DP_AUX_CH_CTL_TIME_OUT_MASK (3 << 26)
1443#define DP_AUX_CH_CTL_RECEIVE_ERROR (1 << 25)
1444#define DP_AUX_CH_CTL_MESSAGE_SIZE_MASK (0x1f << 20)
1445#define DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT 20
1446#define DP_AUX_CH_CTL_PRECHARGE_2US_MASK (0xf << 16)
1447#define DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT 16
1448#define DP_AUX_CH_CTL_AUX_AKSV_SELECT (1 << 15)
1449#define DP_AUX_CH_CTL_MANCHESTER_TEST (1 << 14)
1450#define DP_AUX_CH_CTL_SYNC_TEST (1 << 13)
1451#define DP_AUX_CH_CTL_DEGLITCH_TEST (1 << 12)
1452#define DP_AUX_CH_CTL_PRECHARGE_TEST (1 << 11)
1453#define DP_AUX_CH_CTL_BIT_CLOCK_2X_MASK (0x7ff)
1454#define DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT 0
1455
1456/*
1457 * Computing GMCH M and N values for the Display Port link
1458 *
1459 * GMCH M/N = dot clock * bytes per pixel / ls_clk * # of lanes
1460 *
1461 * ls_clk (we assume) is the DP link clock (1.62 or 2.7 GHz)
1462 *
1463 * The GMCH value is used internally
1464 *
1465 * bytes_per_pixel is the number of bytes coming out of the plane,
1466 * which is after the LUTs, so we want the bytes for our color format.
1467 * For our current usage, this is always 3, one byte for R, G and B.
1468 */
1469#define PIPEA_GMCH_DATA_M 0x70050
1470#define PIPEB_GMCH_DATA_M 0x71050
1471
1472/* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */
1473#define PIPE_GMCH_DATA_M_TU_SIZE_MASK (0x3f << 25)
1474#define PIPE_GMCH_DATA_M_TU_SIZE_SHIFT 25
1475
1476#define PIPE_GMCH_DATA_M_MASK (0xffffff)
1477
1478#define PIPEA_GMCH_DATA_N 0x70054
1479#define PIPEB_GMCH_DATA_N 0x71054
1480#define PIPE_GMCH_DATA_N_MASK (0xffffff)
1481
1482/*
1483 * Computing Link M and N values for the Display Port link
1484 *
1485 * Link M / N = pixel_clock / ls_clk
1486 *
1487 * (the DP spec calls pixel_clock the 'strm_clk')
1488 *
1489 * The Link value is transmitted in the Main Stream
1490 * Attributes and VB-ID.
1491 */
1492
1493#define PIPEA_DP_LINK_M 0x70060
1494#define PIPEB_DP_LINK_M 0x71060
1495#define PIPEA_DP_LINK_M_MASK (0xffffff)
1496
1497#define PIPEA_DP_LINK_N 0x70064
1498#define PIPEB_DP_LINK_N 0x71064
1499#define PIPEA_DP_LINK_N_MASK (0xffffff)
1500
1330/* Display & cursor control */ 1501/* Display & cursor control */
1331 1502
1332/* Pipe A */ 1503/* Pipe A */
@@ -1410,9 +1581,25 @@
1410 1581
1411/* Cursor A & B regs */ 1582/* Cursor A & B regs */
1412#define CURACNTR 0x70080 1583#define CURACNTR 0x70080
1584/* Old style CUR*CNTR flags (desktop 8xx) */
1585#define CURSOR_ENABLE 0x80000000
1586#define CURSOR_GAMMA_ENABLE 0x40000000
1587#define CURSOR_STRIDE_MASK 0x30000000
1588#define CURSOR_FORMAT_SHIFT 24
1589#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT)
1590#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT)
1591#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT)
1592#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT)
1593#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT)
1594#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT)
1595/* New style CUR*CNTR flags */
1596#define CURSOR_MODE 0x27
1413#define CURSOR_MODE_DISABLE 0x00 1597#define CURSOR_MODE_DISABLE 0x00
1414#define CURSOR_MODE_64_32B_AX 0x07 1598#define CURSOR_MODE_64_32B_AX 0x07
1415#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX) 1599#define CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
1600#define MCURSOR_PIPE_SELECT (1 << 28)
1601#define MCURSOR_PIPE_A 0x00
1602#define MCURSOR_PIPE_B (1 << 28)
1416#define MCURSOR_GAMMA_ENABLE (1 << 26) 1603#define MCURSOR_GAMMA_ENABLE (1 << 26)
1417#define CURABASE 0x70084 1604#define CURABASE 0x70084
1418#define CURAPOS 0x70088 1605#define CURAPOS 0x70088
@@ -1420,6 +1607,7 @@
1420#define CURSOR_POS_SIGN 0x8000 1607#define CURSOR_POS_SIGN 0x8000
1421#define CURSOR_X_SHIFT 0 1608#define CURSOR_X_SHIFT 0
1422#define CURSOR_Y_SHIFT 16 1609#define CURSOR_Y_SHIFT 16
1610#define CURSIZE 0x700a0
1423#define CURBCNTR 0x700c0 1611#define CURBCNTR 0x700c0
1424#define CURBBASE 0x700c4 1612#define CURBBASE 0x700c4
1425#define CURBPOS 0x700c8 1613#define CURBPOS 0x700c8
@@ -1499,4 +1687,444 @@
1499# define VGA_2X_MODE (1 << 30) 1687# define VGA_2X_MODE (1 << 30)
1500# define VGA_PIPE_B_SELECT (1 << 29) 1688# define VGA_PIPE_B_SELECT (1 << 29)
1501 1689
1690/* IGDNG */
1691
1692#define CPU_VGACNTRL 0x41000
1693
1694#define DIGITAL_PORT_HOTPLUG_CNTRL 0x44030
1695#define DIGITAL_PORTA_HOTPLUG_ENABLE (1 << 4)
1696#define DIGITAL_PORTA_SHORT_PULSE_2MS (0 << 2)
1697#define DIGITAL_PORTA_SHORT_PULSE_4_5MS (1 << 2)
1698#define DIGITAL_PORTA_SHORT_PULSE_6MS (2 << 2)
1699#define DIGITAL_PORTA_SHORT_PULSE_100MS (3 << 2)
1700#define DIGITAL_PORTA_NO_DETECT (0 << 0)
1701#define DIGITAL_PORTA_LONG_PULSE_DETECT_MASK (1 << 1)
1702#define DIGITAL_PORTA_SHORT_PULSE_DETECT_MASK (1 << 0)
1703
1704/* refresh rate hardware control */
1705#define RR_HW_CTL 0x45300
1706#define RR_HW_LOW_POWER_FRAMES_MASK 0xff
1707#define RR_HW_HIGH_POWER_FRAMES_MASK 0xff00
1708
1709#define FDI_PLL_BIOS_0 0x46000
1710#define FDI_PLL_BIOS_1 0x46004
1711#define FDI_PLL_BIOS_2 0x46008
1712#define DISPLAY_PORT_PLL_BIOS_0 0x4600c
1713#define DISPLAY_PORT_PLL_BIOS_1 0x46010
1714#define DISPLAY_PORT_PLL_BIOS_2 0x46014
1715
1716#define FDI_PLL_FREQ_CTL 0x46030
1717#define FDI_PLL_FREQ_CHANGE_REQUEST (1<<24)
1718#define FDI_PLL_FREQ_LOCK_LIMIT_MASK 0xfff00
1719#define FDI_PLL_FREQ_DISABLE_COUNT_LIMIT_MASK 0xff
1720
1721
1722#define PIPEA_DATA_M1 0x60030
1723#define TU_SIZE(x) (((x)-1) << 25) /* default size 64 */
1724#define TU_SIZE_MASK 0x7e000000
1725#define PIPEA_DATA_M1_OFFSET 0
1726#define PIPEA_DATA_N1 0x60034
1727#define PIPEA_DATA_N1_OFFSET 0
1728
1729#define PIPEA_DATA_M2 0x60038
1730#define PIPEA_DATA_M2_OFFSET 0
1731#define PIPEA_DATA_N2 0x6003c
1732#define PIPEA_DATA_N2_OFFSET 0
1733
1734#define PIPEA_LINK_M1 0x60040
1735#define PIPEA_LINK_M1_OFFSET 0
1736#define PIPEA_LINK_N1 0x60044
1737#define PIPEA_LINK_N1_OFFSET 0
1738
1739#define PIPEA_LINK_M2 0x60048
1740#define PIPEA_LINK_M2_OFFSET 0
1741#define PIPEA_LINK_N2 0x6004c
1742#define PIPEA_LINK_N2_OFFSET 0
1743
1744/* PIPEB timing regs are same start from 0x61000 */
1745
1746#define PIPEB_DATA_M1 0x61030
1747#define PIPEB_DATA_M1_OFFSET 0
1748#define PIPEB_DATA_N1 0x61034
1749#define PIPEB_DATA_N1_OFFSET 0
1750
1751#define PIPEB_DATA_M2 0x61038
1752#define PIPEB_DATA_M2_OFFSET 0
1753#define PIPEB_DATA_N2 0x6103c
1754#define PIPEB_DATA_N2_OFFSET 0
1755
1756#define PIPEB_LINK_M1 0x61040
1757#define PIPEB_LINK_M1_OFFSET 0
1758#define PIPEB_LINK_N1 0x61044
1759#define PIPEB_LINK_N1_OFFSET 0
1760
1761#define PIPEB_LINK_M2 0x61048
1762#define PIPEB_LINK_M2_OFFSET 0
1763#define PIPEB_LINK_N2 0x6104c
1764#define PIPEB_LINK_N2_OFFSET 0
1765
1766/* CPU panel fitter */
1767#define PFA_CTL_1 0x68080
1768#define PFB_CTL_1 0x68880
1769#define PF_ENABLE (1<<31)
1770
1771/* legacy palette */
1772#define LGC_PALETTE_A 0x4a000
1773#define LGC_PALETTE_B 0x4a800
1774
1775/* interrupts */
1776#define DE_MASTER_IRQ_CONTROL (1 << 31)
1777#define DE_SPRITEB_FLIP_DONE (1 << 29)
1778#define DE_SPRITEA_FLIP_DONE (1 << 28)
1779#define DE_PLANEB_FLIP_DONE (1 << 27)
1780#define DE_PLANEA_FLIP_DONE (1 << 26)
1781#define DE_PCU_EVENT (1 << 25)
1782#define DE_GTT_FAULT (1 << 24)
1783#define DE_POISON (1 << 23)
1784#define DE_PERFORM_COUNTER (1 << 22)
1785#define DE_PCH_EVENT (1 << 21)
1786#define DE_AUX_CHANNEL_A (1 << 20)
1787#define DE_DP_A_HOTPLUG (1 << 19)
1788#define DE_GSE (1 << 18)
1789#define DE_PIPEB_VBLANK (1 << 15)
1790#define DE_PIPEB_EVEN_FIELD (1 << 14)
1791#define DE_PIPEB_ODD_FIELD (1 << 13)
1792#define DE_PIPEB_LINE_COMPARE (1 << 12)
1793#define DE_PIPEB_VSYNC (1 << 11)
1794#define DE_PIPEB_FIFO_UNDERRUN (1 << 8)
1795#define DE_PIPEA_VBLANK (1 << 7)
1796#define DE_PIPEA_EVEN_FIELD (1 << 6)
1797#define DE_PIPEA_ODD_FIELD (1 << 5)
1798#define DE_PIPEA_LINE_COMPARE (1 << 4)
1799#define DE_PIPEA_VSYNC (1 << 3)
1800#define DE_PIPEA_FIFO_UNDERRUN (1 << 0)
1801
1802#define DEISR 0x44000
1803#define DEIMR 0x44004
1804#define DEIIR 0x44008
1805#define DEIER 0x4400c
1806
1807/* GT interrupt */
1808#define GT_SYNC_STATUS (1 << 2)
1809#define GT_USER_INTERRUPT (1 << 0)
1810
1811#define GTISR 0x44010
1812#define GTIMR 0x44014
1813#define GTIIR 0x44018
1814#define GTIER 0x4401c
1815
1816/* PCH */
1817
1818/* south display engine interrupt */
1819#define SDE_CRT_HOTPLUG (1 << 11)
1820#define SDE_PORTD_HOTPLUG (1 << 10)
1821#define SDE_PORTC_HOTPLUG (1 << 9)
1822#define SDE_PORTB_HOTPLUG (1 << 8)
1823#define SDE_SDVOB_HOTPLUG (1 << 6)
1824
1825#define SDEISR 0xc4000
1826#define SDEIMR 0xc4004
1827#define SDEIIR 0xc4008
1828#define SDEIER 0xc400c
1829
1830/* digital port hotplug */
1831#define PCH_PORT_HOTPLUG 0xc4030
1832#define PORTD_HOTPLUG_ENABLE (1 << 20)
1833#define PORTD_PULSE_DURATION_2ms (0)
1834#define PORTD_PULSE_DURATION_4_5ms (1 << 18)
1835#define PORTD_PULSE_DURATION_6ms (2 << 18)
1836#define PORTD_PULSE_DURATION_100ms (3 << 18)
1837#define PORTD_HOTPLUG_NO_DETECT (0)
1838#define PORTD_HOTPLUG_SHORT_DETECT (1 << 16)
1839#define PORTD_HOTPLUG_LONG_DETECT (1 << 17)
1840#define PORTC_HOTPLUG_ENABLE (1 << 12)
1841#define PORTC_PULSE_DURATION_2ms (0)
1842#define PORTC_PULSE_DURATION_4_5ms (1 << 10)
1843#define PORTC_PULSE_DURATION_6ms (2 << 10)
1844#define PORTC_PULSE_DURATION_100ms (3 << 10)
1845#define PORTC_HOTPLUG_NO_DETECT (0)
1846#define PORTC_HOTPLUG_SHORT_DETECT (1 << 8)
1847#define PORTC_HOTPLUG_LONG_DETECT (1 << 9)
1848#define PORTB_HOTPLUG_ENABLE (1 << 4)
1849#define PORTB_PULSE_DURATION_2ms (0)
1850#define PORTB_PULSE_DURATION_4_5ms (1 << 2)
1851#define PORTB_PULSE_DURATION_6ms (2 << 2)
1852#define PORTB_PULSE_DURATION_100ms (3 << 2)
1853#define PORTB_HOTPLUG_NO_DETECT (0)
1854#define PORTB_HOTPLUG_SHORT_DETECT (1 << 0)
1855#define PORTB_HOTPLUG_LONG_DETECT (1 << 1)
1856
1857#define PCH_GPIOA 0xc5010
1858#define PCH_GPIOB 0xc5014
1859#define PCH_GPIOC 0xc5018
1860#define PCH_GPIOD 0xc501c
1861#define PCH_GPIOE 0xc5020
1862#define PCH_GPIOF 0xc5024
1863
1864#define PCH_DPLL_A 0xc6014
1865#define PCH_DPLL_B 0xc6018
1866
1867#define PCH_FPA0 0xc6040
1868#define PCH_FPA1 0xc6044
1869#define PCH_FPB0 0xc6048
1870#define PCH_FPB1 0xc604c
1871
1872#define PCH_DPLL_TEST 0xc606c
1873
1874#define PCH_DREF_CONTROL 0xC6200
1875#define DREF_CONTROL_MASK 0x7fc3
1876#define DREF_CPU_SOURCE_OUTPUT_DISABLE (0<<13)
1877#define DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD (2<<13)
1878#define DREF_CPU_SOURCE_OUTPUT_NONSPREAD (3<<13)
1879#define DREF_CPU_SOURCE_OUTPUT_MASK (3<<13)
1880#define DREF_SSC_SOURCE_DISABLE (0<<11)
1881#define DREF_SSC_SOURCE_ENABLE (2<<11)
1882#define DREF_SSC_SOURCE_MASK (2<<11)
1883#define DREF_NONSPREAD_SOURCE_DISABLE (0<<9)
1884#define DREF_NONSPREAD_CK505_ENABLE (1<<9)
1885#define DREF_NONSPREAD_SOURCE_ENABLE (2<<9)
1886#define DREF_NONSPREAD_SOURCE_MASK (2<<9)
1887#define DREF_SUPERSPREAD_SOURCE_DISABLE (0<<7)
1888#define DREF_SUPERSPREAD_SOURCE_ENABLE (2<<7)
1889#define DREF_SSC4_DOWNSPREAD (0<<6)
1890#define DREF_SSC4_CENTERSPREAD (1<<6)
1891#define DREF_SSC1_DISABLE (0<<1)
1892#define DREF_SSC1_ENABLE (1<<1)
1893#define DREF_SSC4_DISABLE (0)
1894#define DREF_SSC4_ENABLE (1)
1895
1896#define PCH_RAWCLK_FREQ 0xc6204
1897#define FDL_TP1_TIMER_SHIFT 12
1898#define FDL_TP1_TIMER_MASK (3<<12)
1899#define FDL_TP2_TIMER_SHIFT 10
1900#define FDL_TP2_TIMER_MASK (3<<10)
1901#define RAWCLK_FREQ_MASK 0x3ff
1902
1903#define PCH_DPLL_TMR_CFG 0xc6208
1904
1905#define PCH_SSC4_PARMS 0xc6210
1906#define PCH_SSC4_AUX_PARMS 0xc6214
1907
1908/* transcoder */
1909
1910#define TRANS_HTOTAL_A 0xe0000
1911#define TRANS_HTOTAL_SHIFT 16
1912#define TRANS_HACTIVE_SHIFT 0
1913#define TRANS_HBLANK_A 0xe0004
1914#define TRANS_HBLANK_END_SHIFT 16
1915#define TRANS_HBLANK_START_SHIFT 0
1916#define TRANS_HSYNC_A 0xe0008
1917#define TRANS_HSYNC_END_SHIFT 16
1918#define TRANS_HSYNC_START_SHIFT 0
1919#define TRANS_VTOTAL_A 0xe000c
1920#define TRANS_VTOTAL_SHIFT 16
1921#define TRANS_VACTIVE_SHIFT 0
1922#define TRANS_VBLANK_A 0xe0010
1923#define TRANS_VBLANK_END_SHIFT 16
1924#define TRANS_VBLANK_START_SHIFT 0
1925#define TRANS_VSYNC_A 0xe0014
1926#define TRANS_VSYNC_END_SHIFT 16
1927#define TRANS_VSYNC_START_SHIFT 0
1928
1929#define TRANSA_DATA_M1 0xe0030
1930#define TRANSA_DATA_N1 0xe0034
1931#define TRANSA_DATA_M2 0xe0038
1932#define TRANSA_DATA_N2 0xe003c
1933#define TRANSA_DP_LINK_M1 0xe0040
1934#define TRANSA_DP_LINK_N1 0xe0044
1935#define TRANSA_DP_LINK_M2 0xe0048
1936#define TRANSA_DP_LINK_N2 0xe004c
1937
1938#define TRANS_HTOTAL_B 0xe1000
1939#define TRANS_HBLANK_B 0xe1004
1940#define TRANS_HSYNC_B 0xe1008
1941#define TRANS_VTOTAL_B 0xe100c
1942#define TRANS_VBLANK_B 0xe1010
1943#define TRANS_VSYNC_B 0xe1014
1944
1945#define TRANSB_DATA_M1 0xe1030
1946#define TRANSB_DATA_N1 0xe1034
1947#define TRANSB_DATA_M2 0xe1038
1948#define TRANSB_DATA_N2 0xe103c
1949#define TRANSB_DP_LINK_M1 0xe1040
1950#define TRANSB_DP_LINK_N1 0xe1044
1951#define TRANSB_DP_LINK_M2 0xe1048
1952#define TRANSB_DP_LINK_N2 0xe104c
1953
1954#define TRANSACONF 0xf0008
1955#define TRANSBCONF 0xf1008
1956#define TRANS_DISABLE (0<<31)
1957#define TRANS_ENABLE (1<<31)
1958#define TRANS_STATE_MASK (1<<30)
1959#define TRANS_STATE_DISABLE (0<<30)
1960#define TRANS_STATE_ENABLE (1<<30)
1961#define TRANS_FSYNC_DELAY_HB1 (0<<27)
1962#define TRANS_FSYNC_DELAY_HB2 (1<<27)
1963#define TRANS_FSYNC_DELAY_HB3 (2<<27)
1964#define TRANS_FSYNC_DELAY_HB4 (3<<27)
1965#define TRANS_DP_AUDIO_ONLY (1<<26)
1966#define TRANS_DP_VIDEO_AUDIO (0<<26)
1967#define TRANS_PROGRESSIVE (0<<21)
1968#define TRANS_8BPC (0<<5)
1969#define TRANS_10BPC (1<<5)
1970#define TRANS_6BPC (2<<5)
1971#define TRANS_12BPC (3<<5)
1972
1973#define FDI_RXA_CHICKEN 0xc200c
1974#define FDI_RXB_CHICKEN 0xc2010
1975#define FDI_RX_PHASE_SYNC_POINTER_ENABLE (1)
1976
1977/* CPU: FDI_TX */
1978#define FDI_TXA_CTL 0x60100
1979#define FDI_TXB_CTL 0x61100
1980#define FDI_TX_DISABLE (0<<31)
1981#define FDI_TX_ENABLE (1<<31)
1982#define FDI_LINK_TRAIN_PATTERN_1 (0<<28)
1983#define FDI_LINK_TRAIN_PATTERN_2 (1<<28)
1984#define FDI_LINK_TRAIN_PATTERN_IDLE (2<<28)
1985#define FDI_LINK_TRAIN_NONE (3<<28)
1986#define FDI_LINK_TRAIN_VOLTAGE_0_4V (0<<25)
1987#define FDI_LINK_TRAIN_VOLTAGE_0_6V (1<<25)
1988#define FDI_LINK_TRAIN_VOLTAGE_0_8V (2<<25)
1989#define FDI_LINK_TRAIN_VOLTAGE_1_2V (3<<25)
1990#define FDI_LINK_TRAIN_PRE_EMPHASIS_NONE (0<<22)
1991#define FDI_LINK_TRAIN_PRE_EMPHASIS_1_5X (1<<22)
1992#define FDI_LINK_TRAIN_PRE_EMPHASIS_2X (2<<22)
1993#define FDI_LINK_TRAIN_PRE_EMPHASIS_3X (3<<22)
1994#define FDI_DP_PORT_WIDTH_X1 (0<<19)
1995#define FDI_DP_PORT_WIDTH_X2 (1<<19)
1996#define FDI_DP_PORT_WIDTH_X3 (2<<19)
1997#define FDI_DP_PORT_WIDTH_X4 (3<<19)
1998#define FDI_TX_ENHANCE_FRAME_ENABLE (1<<18)
1999/* IGDNG: hardwired to 1 */
2000#define FDI_TX_PLL_ENABLE (1<<14)
2001/* both Tx and Rx */
2002#define FDI_SCRAMBLING_ENABLE (0<<7)
2003#define FDI_SCRAMBLING_DISABLE (1<<7)
2004
2005/* FDI_RX, FDI_X is hard-wired to Transcoder_X */
2006#define FDI_RXA_CTL 0xf000c
2007#define FDI_RXB_CTL 0xf100c
2008#define FDI_RX_ENABLE (1<<31)
2009#define FDI_RX_DISABLE (0<<31)
2010/* train, dp width same as FDI_TX */
2011#define FDI_DP_PORT_WIDTH_X8 (7<<19)
2012#define FDI_8BPC (0<<16)
2013#define FDI_10BPC (1<<16)
2014#define FDI_6BPC (2<<16)
2015#define FDI_12BPC (3<<16)
2016#define FDI_LINK_REVERSE_OVERWRITE (1<<15)
2017#define FDI_DMI_LINK_REVERSE_MASK (1<<14)
2018#define FDI_RX_PLL_ENABLE (1<<13)
2019#define FDI_FS_ERR_CORRECT_ENABLE (1<<11)
2020#define FDI_FE_ERR_CORRECT_ENABLE (1<<10)
2021#define FDI_FS_ERR_REPORT_ENABLE (1<<9)
2022#define FDI_FE_ERR_REPORT_ENABLE (1<<8)
2023#define FDI_RX_ENHANCE_FRAME_ENABLE (1<<6)
2024#define FDI_SEL_RAWCLK (0<<4)
2025#define FDI_SEL_PCDCLK (1<<4)
2026
2027#define FDI_RXA_MISC 0xf0010
2028#define FDI_RXB_MISC 0xf1010
2029#define FDI_RXA_TUSIZE1 0xf0030
2030#define FDI_RXA_TUSIZE2 0xf0038
2031#define FDI_RXB_TUSIZE1 0xf1030
2032#define FDI_RXB_TUSIZE2 0xf1038
2033
2034/* FDI_RX interrupt register format */
2035#define FDI_RX_INTER_LANE_ALIGN (1<<10)
2036#define FDI_RX_SYMBOL_LOCK (1<<9) /* train 2 */
2037#define FDI_RX_BIT_LOCK (1<<8) /* train 1 */
2038#define FDI_RX_TRAIN_PATTERN_2_FAIL (1<<7)
2039#define FDI_RX_FS_CODE_ERR (1<<6)
2040#define FDI_RX_FE_CODE_ERR (1<<5)
2041#define FDI_RX_SYMBOL_ERR_RATE_ABOVE (1<<4)
2042#define FDI_RX_HDCP_LINK_FAIL (1<<3)
2043#define FDI_RX_PIXEL_FIFO_OVERFLOW (1<<2)
2044#define FDI_RX_CROSS_CLOCK_OVERFLOW (1<<1)
2045#define FDI_RX_SYMBOL_QUEUE_OVERFLOW (1<<0)
2046
2047#define FDI_RXA_IIR 0xf0014
2048#define FDI_RXA_IMR 0xf0018
2049#define FDI_RXB_IIR 0xf1014
2050#define FDI_RXB_IMR 0xf1018
2051
2052#define FDI_PLL_CTL_1 0xfe000
2053#define FDI_PLL_CTL_2 0xfe004
2054
2055/* CRT */
2056#define PCH_ADPA 0xe1100
2057#define ADPA_TRANS_SELECT_MASK (1<<30)
2058#define ADPA_TRANS_A_SELECT 0
2059#define ADPA_TRANS_B_SELECT (1<<30)
2060#define ADPA_CRT_HOTPLUG_MASK 0x03ff0000 /* bit 25-16 */
2061#define ADPA_CRT_HOTPLUG_MONITOR_NONE (0<<24)
2062#define ADPA_CRT_HOTPLUG_MONITOR_MASK (3<<24)
2063#define ADPA_CRT_HOTPLUG_MONITOR_COLOR (3<<24)
2064#define ADPA_CRT_HOTPLUG_MONITOR_MONO (2<<24)
2065#define ADPA_CRT_HOTPLUG_ENABLE (1<<23)
2066#define ADPA_CRT_HOTPLUG_PERIOD_64 (0<<22)
2067#define ADPA_CRT_HOTPLUG_PERIOD_128 (1<<22)
2068#define ADPA_CRT_HOTPLUG_WARMUP_5MS (0<<21)
2069#define ADPA_CRT_HOTPLUG_WARMUP_10MS (1<<21)
2070#define ADPA_CRT_HOTPLUG_SAMPLE_2S (0<<20)
2071#define ADPA_CRT_HOTPLUG_SAMPLE_4S (1<<20)
2072#define ADPA_CRT_HOTPLUG_VOLTAGE_40 (0<<18)
2073#define ADPA_CRT_HOTPLUG_VOLTAGE_50 (1<<18)
2074#define ADPA_CRT_HOTPLUG_VOLTAGE_60 (2<<18)
2075#define ADPA_CRT_HOTPLUG_VOLTAGE_70 (3<<18)
2076#define ADPA_CRT_HOTPLUG_VOLREF_325MV (0<<17)
2077#define ADPA_CRT_HOTPLUG_VOLREF_475MV (1<<17)
2078#define ADPA_CRT_HOTPLUG_FORCE_TRIGGER (1<<16)
2079
2080/* or SDVOB */
2081#define HDMIB 0xe1140
2082#define PORT_ENABLE (1 << 31)
2083#define TRANSCODER_A (0)
2084#define TRANSCODER_B (1 << 30)
2085#define COLOR_FORMAT_8bpc (0)
2086#define COLOR_FORMAT_12bpc (3 << 26)
2087#define SDVOB_HOTPLUG_ENABLE (1 << 23)
2088#define SDVO_ENCODING (0)
2089#define TMDS_ENCODING (2 << 10)
2090#define NULL_PACKET_VSYNC_ENABLE (1 << 9)
2091#define SDVOB_BORDER_ENABLE (1 << 7)
2092#define AUDIO_ENABLE (1 << 6)
2093#define VSYNC_ACTIVE_HIGH (1 << 4)
2094#define HSYNC_ACTIVE_HIGH (1 << 3)
2095#define PORT_DETECTED (1 << 2)
2096
2097#define HDMIC 0xe1150
2098#define HDMID 0xe1160
2099
2100#define PCH_LVDS 0xe1180
2101#define LVDS_DETECTED (1 << 1)
2102
2103#define BLC_PWM_CPU_CTL2 0x48250
2104#define PWM_ENABLE (1 << 31)
2105#define PWM_PIPE_A (0 << 29)
2106#define PWM_PIPE_B (1 << 29)
2107#define BLC_PWM_CPU_CTL 0x48254
2108
2109#define BLC_PWM_PCH_CTL1 0xc8250
2110#define PWM_PCH_ENABLE (1 << 31)
2111#define PWM_POLARITY_ACTIVE_LOW (1 << 29)
2112#define PWM_POLARITY_ACTIVE_HIGH (0 << 29)
2113#define PWM_POLARITY_ACTIVE_LOW2 (1 << 28)
2114#define PWM_POLARITY_ACTIVE_HIGH2 (0 << 28)
2115
2116#define BLC_PWM_PCH_CTL2 0xc8254
2117
2118#define PCH_PP_STATUS 0xc7200
2119#define PCH_PP_CONTROL 0xc7204
2120#define EDP_FORCE_VDD (1 << 3)
2121#define EDP_BLC_ENABLE (1 << 2)
2122#define PANEL_POWER_RESET (1 << 1)
2123#define PANEL_POWER_OFF (0 << 0)
2124#define PANEL_POWER_ON (1 << 0)
2125#define PCH_PP_ON_DELAYS 0xc7208
2126#define EDP_PANEL (1 << 30)
2127#define PCH_PP_OFF_DELAYS 0xc720c
2128#define PCH_PP_DIVISOR 0xc7210
2129
1502#endif /* _I915_REG_H_ */ 2130#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index ce8a21344a71..a98e2831ed31 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -295,6 +295,16 @@ int i915_save_state(struct drm_device *dev)
295 i915_save_palette(dev, PIPE_B); 295 i915_save_palette(dev, PIPE_B);
296 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT); 296 dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
297 297
298 /* Cursor state */
299 dev_priv->saveCURACNTR = I915_READ(CURACNTR);
300 dev_priv->saveCURAPOS = I915_READ(CURAPOS);
301 dev_priv->saveCURABASE = I915_READ(CURABASE);
302 dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
303 dev_priv->saveCURBPOS = I915_READ(CURBPOS);
304 dev_priv->saveCURBBASE = I915_READ(CURBBASE);
305 if (!IS_I9XX(dev))
306 dev_priv->saveCURSIZE = I915_READ(CURSIZE);
307
298 /* CRT state */ 308 /* CRT state */
299 dev_priv->saveADPA = I915_READ(ADPA); 309 dev_priv->saveADPA = I915_READ(ADPA);
300 310
@@ -480,6 +490,16 @@ int i915_restore_state(struct drm_device *dev)
480 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR); 490 I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
481 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR)); 491 I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
482 492
493 /* Cursor state */
494 I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
495 I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
496 I915_WRITE(CURABASE, dev_priv->saveCURABASE);
497 I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
498 I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
499 I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
500 if (!IS_I9XX(dev))
501 I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
502
483 /* CRT state */ 503 /* CRT state */
484 I915_WRITE(ADPA, dev_priv->saveADPA); 504 I915_WRITE(ADPA, dev_priv->saveADPA);
485 505
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index fc28e2bbd542..754dd22fdd77 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -30,6 +30,8 @@
30#include "i915_drv.h" 30#include "i915_drv.h"
31#include "intel_bios.h" 31#include "intel_bios.h"
32 32
33#define SLAVE_ADDR1 0x70
34#define SLAVE_ADDR2 0x72
33 35
34static void * 36static void *
35find_section(struct bdb_header *bdb, int section_id) 37find_section(struct bdb_header *bdb, int section_id)
@@ -57,9 +59,43 @@ find_section(struct bdb_header *bdb, int section_id)
57 return NULL; 59 return NULL;
58} 60}
59 61
60/* Try to find panel data */
61static void 62static void
62parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb) 63fill_detail_timing_data(struct drm_display_mode *panel_fixed_mode,
64 struct lvds_dvo_timing *dvo_timing)
65{
66 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) |
67 dvo_timing->hactive_lo;
68 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
69 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
70 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
71 dvo_timing->hsync_pulse_width;
72 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
73 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
74
75 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) |
76 dvo_timing->vactive_lo;
77 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
78 dvo_timing->vsync_off;
79 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
80 dvo_timing->vsync_pulse_width;
81 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
82 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
83 panel_fixed_mode->clock = dvo_timing->clock * 10;
84 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
85
86 /* Some VBTs have bogus h/vtotal values */
87 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal)
88 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
89 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
90 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
91
92 drm_mode_set_name(panel_fixed_mode);
93}
94
95/* Try to find integrated panel data */
96static void
97parse_lfp_panel_data(struct drm_i915_private *dev_priv,
98 struct bdb_header *bdb)
63{ 99{
64 struct bdb_lvds_options *lvds_options; 100 struct bdb_lvds_options *lvds_options;
65 struct bdb_lvds_lfp_data *lvds_lfp_data; 101 struct bdb_lvds_lfp_data *lvds_lfp_data;
@@ -91,38 +127,45 @@ parse_panel_data(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
91 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode), 127 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
92 DRM_MEM_DRIVER); 128 DRM_MEM_DRIVER);
93 129
94 panel_fixed_mode->hdisplay = (dvo_timing->hactive_hi << 8) | 130 fill_detail_timing_data(panel_fixed_mode, dvo_timing);
95 dvo_timing->hactive_lo;
96 panel_fixed_mode->hsync_start = panel_fixed_mode->hdisplay +
97 ((dvo_timing->hsync_off_hi << 8) | dvo_timing->hsync_off_lo);
98 panel_fixed_mode->hsync_end = panel_fixed_mode->hsync_start +
99 dvo_timing->hsync_pulse_width;
100 panel_fixed_mode->htotal = panel_fixed_mode->hdisplay +
101 ((dvo_timing->hblank_hi << 8) | dvo_timing->hblank_lo);
102 131
103 panel_fixed_mode->vdisplay = (dvo_timing->vactive_hi << 8) | 132 dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
104 dvo_timing->vactive_lo;
105 panel_fixed_mode->vsync_start = panel_fixed_mode->vdisplay +
106 dvo_timing->vsync_off;
107 panel_fixed_mode->vsync_end = panel_fixed_mode->vsync_start +
108 dvo_timing->vsync_pulse_width;
109 panel_fixed_mode->vtotal = panel_fixed_mode->vdisplay +
110 ((dvo_timing->vblank_hi << 8) | dvo_timing->vblank_lo);
111 panel_fixed_mode->clock = dvo_timing->clock * 10;
112 panel_fixed_mode->type = DRM_MODE_TYPE_PREFERRED;
113 133
114 /* Some VBTs have bogus h/vtotal values */ 134 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
115 if (panel_fixed_mode->hsync_end > panel_fixed_mode->htotal) 135 drm_mode_debug_printmodeline(panel_fixed_mode);
116 panel_fixed_mode->htotal = panel_fixed_mode->hsync_end + 1;
117 if (panel_fixed_mode->vsync_end > panel_fixed_mode->vtotal)
118 panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end + 1;
119 136
120 drm_mode_set_name(panel_fixed_mode); 137 return;
138}
121 139
122 dev_priv->vbt_mode = panel_fixed_mode; 140/* Try to find sdvo panel data */
141static void
142parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
143 struct bdb_header *bdb)
144{
145 struct bdb_sdvo_lvds_options *sdvo_lvds_options;
146 struct lvds_dvo_timing *dvo_timing;
147 struct drm_display_mode *panel_fixed_mode;
123 148
124 DRM_DEBUG("Found panel mode in BIOS VBT tables:\n"); 149 dev_priv->sdvo_lvds_vbt_mode = NULL;
125 drm_mode_debug_printmodeline(panel_fixed_mode); 150
151 sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
152 if (!sdvo_lvds_options)
153 return;
154
155 dvo_timing = find_section(bdb, BDB_SDVO_PANEL_DTDS);
156 if (!dvo_timing)
157 return;
158
159 panel_fixed_mode = drm_calloc(1, sizeof(*panel_fixed_mode),
160 DRM_MEM_DRIVER);
161
162 if (!panel_fixed_mode)
163 return;
164
165 fill_detail_timing_data(panel_fixed_mode,
166 dvo_timing + sdvo_lvds_options->panel_type);
167
168 dev_priv->sdvo_lvds_vbt_mode = panel_fixed_mode;
126 169
127 return; 170 return;
128} 171}
@@ -152,6 +195,88 @@ parse_general_features(struct drm_i915_private *dev_priv,
152 } 195 }
153} 196}
154 197
198static void
199parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
200 struct bdb_header *bdb)
201{
202 struct sdvo_device_mapping *p_mapping;
203 struct bdb_general_definitions *p_defs;
204 struct child_device_config *p_child;
205 int i, child_device_num, count;
206 u16 block_size, *block_ptr;
207
208 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
209 if (!p_defs) {
210 DRM_DEBUG("No general definition block is found\n");
211 return;
212 }
213 /* judge whether the size of child device meets the requirements.
214 * If the child device size obtained from general definition block
215 * is different with sizeof(struct child_device_config), skip the
216 * parsing of sdvo device info
217 */
218 if (p_defs->child_dev_size != sizeof(*p_child)) {
219 /* different child dev size . Ignore it */
220 DRM_DEBUG("different child size is found. Invalid.\n");
221 return;
222 }
223 /* get the block size of general definitions */
224 block_ptr = (u16 *)((char *)p_defs - 2);
225 block_size = *block_ptr;
226 /* get the number of child device */
227 child_device_num = (block_size - sizeof(*p_defs)) /
228 sizeof(*p_child);
229 count = 0;
230 for (i = 0; i < child_device_num; i++) {
231 p_child = &(p_defs->devices[i]);
232 if (!p_child->device_type) {
233 /* skip the device block if device type is invalid */
234 continue;
235 }
236 if (p_child->slave_addr != SLAVE_ADDR1 &&
237 p_child->slave_addr != SLAVE_ADDR2) {
238 /*
239 * If the slave address is neither 0x70 nor 0x72,
240 * it is not a SDVO device. Skip it.
241 */
242 continue;
243 }
244 if (p_child->dvo_port != DEVICE_PORT_DVOB &&
245 p_child->dvo_port != DEVICE_PORT_DVOC) {
246 /* skip the incorrect SDVO port */
247 DRM_DEBUG("Incorrect SDVO port. Skip it \n");
248 continue;
249 }
250 DRM_DEBUG("the SDVO device with slave addr %2x is found on "
251 "%s port\n",
252 p_child->slave_addr,
253 (p_child->dvo_port == DEVICE_PORT_DVOB) ?
254 "SDVOB" : "SDVOC");
255 p_mapping = &(dev_priv->sdvo_mappings[p_child->dvo_port - 1]);
256 if (!p_mapping->initialized) {
257 p_mapping->dvo_port = p_child->dvo_port;
258 p_mapping->slave_addr = p_child->slave_addr;
259 p_mapping->dvo_wiring = p_child->dvo_wiring;
260 p_mapping->initialized = 1;
261 } else {
262 DRM_DEBUG("Maybe one SDVO port is shared by "
263 "two SDVO device.\n");
264 }
265 if (p_child->slave2_addr) {
266 /* Maybe this is a SDVO device with multiple inputs */
267 /* And the mapping info is not added */
268 DRM_DEBUG("there exists the slave2_addr. Maybe this "
269 "is a SDVO device with multiple inputs.\n");
270 }
271 count++;
272 }
273
274 if (!count) {
275 /* No SDVO device info is found */
276 DRM_DEBUG("No SDVO device info is found in VBT\n");
277 }
278 return;
279}
155/** 280/**
156 * intel_init_bios - initialize VBIOS settings & find VBT 281 * intel_init_bios - initialize VBIOS settings & find VBT
157 * @dev: DRM device 282 * @dev: DRM device
@@ -199,8 +324,9 @@ intel_init_bios(struct drm_device *dev)
199 324
200 /* Grab useful general definitions */ 325 /* Grab useful general definitions */
201 parse_general_features(dev_priv, bdb); 326 parse_general_features(dev_priv, bdb);
202 parse_panel_data(dev_priv, bdb); 327 parse_lfp_panel_data(dev_priv, bdb);
203 328 parse_sdvo_panel_data(dev_priv, bdb);
329 parse_sdvo_device_mapping(dev_priv, bdb);
204 pci_unmap_rom(pdev, bios); 330 pci_unmap_rom(pdev, bios);
205 331
206 return 0; 332 return 0;
diff --git a/drivers/gpu/drm/i915/intel_bios.h b/drivers/gpu/drm/i915/intel_bios.h
index de621aad85b5..fe72e1c225d8 100644
--- a/drivers/gpu/drm/i915/intel_bios.h
+++ b/drivers/gpu/drm/i915/intel_bios.h
@@ -135,6 +135,86 @@ struct bdb_general_features {
135 u8 rsvd11:6; /* finish byte */ 135 u8 rsvd11:6; /* finish byte */
136} __attribute__((packed)); 136} __attribute__((packed));
137 137
138/* pre-915 */
139#define GPIO_PIN_DVI_LVDS 0x03 /* "DVI/LVDS DDC GPIO pins" */
140#define GPIO_PIN_ADD_I2C 0x05 /* "ADDCARD I2C GPIO pins" */
141#define GPIO_PIN_ADD_DDC 0x04 /* "ADDCARD DDC GPIO pins" */
142#define GPIO_PIN_ADD_DDC_I2C 0x06 /* "ADDCARD DDC/I2C GPIO pins" */
143
144/* Pre 915 */
145#define DEVICE_TYPE_NONE 0x00
146#define DEVICE_TYPE_CRT 0x01
147#define DEVICE_TYPE_TV 0x09
148#define DEVICE_TYPE_EFP 0x12
149#define DEVICE_TYPE_LFP 0x22
150/* On 915+ */
151#define DEVICE_TYPE_CRT_DPMS 0x6001
152#define DEVICE_TYPE_CRT_DPMS_HOTPLUG 0x4001
153#define DEVICE_TYPE_TV_COMPOSITE 0x0209
154#define DEVICE_TYPE_TV_MACROVISION 0x0289
155#define DEVICE_TYPE_TV_RF_COMPOSITE 0x020c
156#define DEVICE_TYPE_TV_SVIDEO_COMPOSITE 0x0609
157#define DEVICE_TYPE_TV_SCART 0x0209
158#define DEVICE_TYPE_TV_CODEC_HOTPLUG_PWR 0x6009
159#define DEVICE_TYPE_EFP_HOTPLUG_PWR 0x6012
160#define DEVICE_TYPE_EFP_DVI_HOTPLUG_PWR 0x6052
161#define DEVICE_TYPE_EFP_DVI_I 0x6053
162#define DEVICE_TYPE_EFP_DVI_D_DUAL 0x6152
163#define DEVICE_TYPE_EFP_DVI_D_HDCP 0x60d2
164#define DEVICE_TYPE_OPENLDI_HOTPLUG_PWR 0x6062
165#define DEVICE_TYPE_OPENLDI_DUALPIX 0x6162
166#define DEVICE_TYPE_LFP_PANELLINK 0x5012
167#define DEVICE_TYPE_LFP_CMOS_PWR 0x5042
168#define DEVICE_TYPE_LFP_LVDS_PWR 0x5062
169#define DEVICE_TYPE_LFP_LVDS_DUAL 0x5162
170#define DEVICE_TYPE_LFP_LVDS_DUAL_HDCP 0x51e2
171
172#define DEVICE_CFG_NONE 0x00
173#define DEVICE_CFG_12BIT_DVOB 0x01
174#define DEVICE_CFG_12BIT_DVOC 0x02
175#define DEVICE_CFG_24BIT_DVOBC 0x09
176#define DEVICE_CFG_24BIT_DVOCB 0x0a
177#define DEVICE_CFG_DUAL_DVOB 0x11
178#define DEVICE_CFG_DUAL_DVOC 0x12
179#define DEVICE_CFG_DUAL_DVOBC 0x13
180#define DEVICE_CFG_DUAL_LINK_DVOBC 0x19
181#define DEVICE_CFG_DUAL_LINK_DVOCB 0x1a
182
183#define DEVICE_WIRE_NONE 0x00
184#define DEVICE_WIRE_DVOB 0x01
185#define DEVICE_WIRE_DVOC 0x02
186#define DEVICE_WIRE_DVOBC 0x03
187#define DEVICE_WIRE_DVOBB 0x05
188#define DEVICE_WIRE_DVOCC 0x06
189#define DEVICE_WIRE_DVOB_MASTER 0x0d
190#define DEVICE_WIRE_DVOC_MASTER 0x0e
191
192#define DEVICE_PORT_DVOA 0x00 /* none on 845+ */
193#define DEVICE_PORT_DVOB 0x01
194#define DEVICE_PORT_DVOC 0x02
195
196struct child_device_config {
197 u16 handle;
198 u16 device_type;
199 u8 device_id[10]; /* See DEVICE_TYPE_* above */
200 u16 addin_offset;
201 u8 dvo_port; /* See Device_PORT_* above */
202 u8 i2c_pin;
203 u8 slave_addr;
204 u8 ddc_pin;
205 u16 edid_ptr;
206 u8 dvo_cfg; /* See DEVICE_CFG_* above */
207 u8 dvo2_port;
208 u8 i2c2_pin;
209 u8 slave2_addr;
210 u8 ddc2_pin;
211 u8 capabilities;
212 u8 dvo_wiring;/* See DEVICE_WIRE_* above */
213 u8 dvo2_wiring;
214 u16 extended_type;
215 u8 dvo_function;
216} __attribute__((packed));
217
138struct bdb_general_definitions { 218struct bdb_general_definitions {
139 /* DDC GPIO */ 219 /* DDC GPIO */
140 u8 crt_ddc_gmbus_pin; 220 u8 crt_ddc_gmbus_pin;
@@ -149,14 +229,19 @@ struct bdb_general_definitions {
149 u8 boot_display[2]; 229 u8 boot_display[2];
150 u8 child_dev_size; 230 u8 child_dev_size;
151 231
152 /* device info */ 232 /*
153 u8 tv_or_lvds_info[33]; 233 * Device info:
154 u8 dev1[33]; 234 * If TV is present, it'll be at devices[0].
155 u8 dev2[33]; 235 * LVDS will be next, either devices[0] or [1], if present.
156 u8 dev3[33]; 236 * On some platforms the number of device is 6. But could be as few as
157 u8 dev4[33]; 237 * 4 if both TV and LVDS are missing.
158 /* may be another device block here on some platforms */ 238 * And the device num is related with the size of general definition
159}; 239 * block. It is obtained by using the following formula:
240 * number = (block_size - sizeof(bdb_general_definitions))/
241 * sizeof(child_device_config);
242 */
243 struct child_device_config devices[0];
244} __attribute__((packed));
160 245
161struct bdb_lvds_options { 246struct bdb_lvds_options {
162 u8 panel_type; 247 u8 panel_type;
@@ -279,6 +364,23 @@ struct vch_bdb_22 {
279 struct vch_panel_data panels[16]; 364 struct vch_panel_data panels[16];
280} __attribute__((packed)); 365} __attribute__((packed));
281 366
367struct bdb_sdvo_lvds_options {
368 u8 panel_backlight;
369 u8 h40_set_panel_type;
370 u8 panel_type;
371 u8 ssc_clk_freq;
372 u16 als_low_trip;
373 u16 als_high_trip;
374 u8 sclalarcoeff_tab_row_num;
375 u8 sclalarcoeff_tab_row_size;
376 u8 coefficient[8];
377 u8 panel_misc_bits_1;
378 u8 panel_misc_bits_2;
379 u8 panel_misc_bits_3;
380 u8 panel_misc_bits_4;
381} __attribute__((packed));
382
383
282bool intel_init_bios(struct drm_device *dev); 384bool intel_init_bios(struct drm_device *dev);
283 385
284/* 386/*
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 19148c3df637..6de97fc66029 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -37,9 +37,14 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
37{ 37{
38 struct drm_device *dev = encoder->dev; 38 struct drm_device *dev = encoder->dev;
39 struct drm_i915_private *dev_priv = dev->dev_private; 39 struct drm_i915_private *dev_priv = dev->dev_private;
40 u32 temp; 40 u32 temp, reg;
41 41
42 temp = I915_READ(ADPA); 42 if (IS_IGDNG(dev))
43 reg = PCH_ADPA;
44 else
45 reg = ADPA;
46
47 temp = I915_READ(reg);
43 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE); 48 temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
44 temp |= ADPA_DAC_ENABLE; 49 temp |= ADPA_DAC_ENABLE;
45 50
@@ -58,7 +63,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
58 break; 63 break;
59 } 64 }
60 65
61 I915_WRITE(ADPA, temp); 66 I915_WRITE(reg, temp);
62} 67}
63 68
64static int intel_crt_mode_valid(struct drm_connector *connector, 69static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -101,17 +106,23 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
101 struct drm_i915_private *dev_priv = dev->dev_private; 106 struct drm_i915_private *dev_priv = dev->dev_private;
102 int dpll_md_reg; 107 int dpll_md_reg;
103 u32 adpa, dpll_md; 108 u32 adpa, dpll_md;
109 u32 adpa_reg;
104 110
105 if (intel_crtc->pipe == 0) 111 if (intel_crtc->pipe == 0)
106 dpll_md_reg = DPLL_A_MD; 112 dpll_md_reg = DPLL_A_MD;
107 else 113 else
108 dpll_md_reg = DPLL_B_MD; 114 dpll_md_reg = DPLL_B_MD;
109 115
116 if (IS_IGDNG(dev))
117 adpa_reg = PCH_ADPA;
118 else
119 adpa_reg = ADPA;
120
110 /* 121 /*
111 * Disable separate mode multiplier used when cloning SDVO to CRT 122 * Disable separate mode multiplier used when cloning SDVO to CRT
112 * XXX this needs to be adjusted when we really are cloning 123 * XXX this needs to be adjusted when we really are cloning
113 */ 124 */
114 if (IS_I965G(dev)) { 125 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
115 dpll_md = I915_READ(dpll_md_reg); 126 dpll_md = I915_READ(dpll_md_reg);
116 I915_WRITE(dpll_md_reg, 127 I915_WRITE(dpll_md_reg,
117 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK); 128 dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -125,13 +136,53 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
125 136
126 if (intel_crtc->pipe == 0) { 137 if (intel_crtc->pipe == 0) {
127 adpa |= ADPA_PIPE_A_SELECT; 138 adpa |= ADPA_PIPE_A_SELECT;
128 I915_WRITE(BCLRPAT_A, 0); 139 if (!IS_IGDNG(dev))
140 I915_WRITE(BCLRPAT_A, 0);
129 } else { 141 } else {
130 adpa |= ADPA_PIPE_B_SELECT; 142 adpa |= ADPA_PIPE_B_SELECT;
131 I915_WRITE(BCLRPAT_B, 0); 143 if (!IS_IGDNG(dev))
144 I915_WRITE(BCLRPAT_B, 0);
132 } 145 }
133 146
134 I915_WRITE(ADPA, adpa); 147 I915_WRITE(adpa_reg, adpa);
148}
149
150static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
151{
152 struct drm_device *dev = connector->dev;
153 struct drm_i915_private *dev_priv = dev->dev_private;
154 u32 adpa, temp;
155 bool ret;
156
157 temp = adpa = I915_READ(PCH_ADPA);
158
159 adpa &= ~ADPA_CRT_HOTPLUG_MASK;
160
161 adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
162 ADPA_CRT_HOTPLUG_WARMUP_10MS |
163 ADPA_CRT_HOTPLUG_SAMPLE_4S |
164 ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
165 ADPA_CRT_HOTPLUG_VOLREF_325MV |
166 ADPA_CRT_HOTPLUG_ENABLE |
167 ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
168
169 DRM_DEBUG("pch crt adpa 0x%x", adpa);
170 I915_WRITE(PCH_ADPA, adpa);
171
172 /* This might not be needed as not specified in spec...*/
173 udelay(1000);
174
175 /* Check the status to see if both blue and green are on now */
176 adpa = I915_READ(PCH_ADPA);
177 if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) ==
178 ADPA_CRT_HOTPLUG_MONITOR_COLOR)
179 ret = true;
180 else
181 ret = false;
182
183 /* restore origin register */
184 I915_WRITE(PCH_ADPA, temp);
185 return ret;
135} 186}
136 187
137/** 188/**
@@ -148,6 +199,10 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
148 struct drm_i915_private *dev_priv = dev->dev_private; 199 struct drm_i915_private *dev_priv = dev->dev_private;
149 u32 hotplug_en; 200 u32 hotplug_en;
150 int i, tries = 0; 201 int i, tries = 0;
202
203 if (IS_IGDNG(dev))
204 return intel_igdng_crt_detect_hotplug(connector);
205
151 /* 206 /*
152 * On 4 series desktop, CRT detect sequence need to be done twice 207 * On 4 series desktop, CRT detect sequence need to be done twice
153 * to get a reliable result. 208 * to get a reliable result.
@@ -198,9 +253,142 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
198 return intel_ddc_probe(intel_output); 253 return intel_ddc_probe(intel_output);
199} 254}
200 255
256static enum drm_connector_status
257intel_crt_load_detect(struct drm_crtc *crtc, struct intel_output *intel_output)
258{
259 struct drm_encoder *encoder = &intel_output->enc;
260 struct drm_device *dev = encoder->dev;
261 struct drm_i915_private *dev_priv = dev->dev_private;
262 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
263 uint32_t pipe = intel_crtc->pipe;
264 uint32_t save_bclrpat;
265 uint32_t save_vtotal;
266 uint32_t vtotal, vactive;
267 uint32_t vsample;
268 uint32_t vblank, vblank_start, vblank_end;
269 uint32_t dsl;
270 uint32_t bclrpat_reg;
271 uint32_t vtotal_reg;
272 uint32_t vblank_reg;
273 uint32_t vsync_reg;
274 uint32_t pipeconf_reg;
275 uint32_t pipe_dsl_reg;
276 uint8_t st00;
277 enum drm_connector_status status;
278
279 if (pipe == 0) {
280 bclrpat_reg = BCLRPAT_A;
281 vtotal_reg = VTOTAL_A;
282 vblank_reg = VBLANK_A;
283 vsync_reg = VSYNC_A;
284 pipeconf_reg = PIPEACONF;
285 pipe_dsl_reg = PIPEADSL;
286 } else {
287 bclrpat_reg = BCLRPAT_B;
288 vtotal_reg = VTOTAL_B;
289 vblank_reg = VBLANK_B;
290 vsync_reg = VSYNC_B;
291 pipeconf_reg = PIPEBCONF;
292 pipe_dsl_reg = PIPEBDSL;
293 }
294
295 save_bclrpat = I915_READ(bclrpat_reg);
296 save_vtotal = I915_READ(vtotal_reg);
297 vblank = I915_READ(vblank_reg);
298
299 vtotal = ((save_vtotal >> 16) & 0xfff) + 1;
300 vactive = (save_vtotal & 0x7ff) + 1;
301
302 vblank_start = (vblank & 0xfff) + 1;
303 vblank_end = ((vblank >> 16) & 0xfff) + 1;
304
305 /* Set the border color to purple. */
306 I915_WRITE(bclrpat_reg, 0x500050);
307
308 if (IS_I9XX(dev)) {
309 uint32_t pipeconf = I915_READ(pipeconf_reg);
310 I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
311 /* Wait for next Vblank to substitue
312 * border color for Color info */
313 intel_wait_for_vblank(dev);
314 st00 = I915_READ8(VGA_MSR_WRITE);
315 status = ((st00 & (1 << 4)) != 0) ?
316 connector_status_connected :
317 connector_status_disconnected;
318
319 I915_WRITE(pipeconf_reg, pipeconf);
320 } else {
321 bool restore_vblank = false;
322 int count, detect;
323
324 /*
325 * If there isn't any border, add some.
326 * Yes, this will flicker
327 */
328 if (vblank_start <= vactive && vblank_end >= vtotal) {
329 uint32_t vsync = I915_READ(vsync_reg);
330 uint32_t vsync_start = (vsync & 0xffff) + 1;
331
332 vblank_start = vsync_start;
333 I915_WRITE(vblank_reg,
334 (vblank_start - 1) |
335 ((vblank_end - 1) << 16));
336 restore_vblank = true;
337 }
338 /* sample in the vertical border, selecting the larger one */
339 if (vblank_start - vactive >= vtotal - vblank_end)
340 vsample = (vblank_start + vactive) >> 1;
341 else
342 vsample = (vtotal + vblank_end) >> 1;
343
344 /*
345 * Wait for the border to be displayed
346 */
347 while (I915_READ(pipe_dsl_reg) >= vactive)
348 ;
349 while ((dsl = I915_READ(pipe_dsl_reg)) <= vsample)
350 ;
351 /*
352 * Watch ST00 for an entire scanline
353 */
354 detect = 0;
355 count = 0;
356 do {
357 count++;
358 /* Read the ST00 VGA status register */
359 st00 = I915_READ8(VGA_MSR_WRITE);
360 if (st00 & (1 << 4))
361 detect++;
362 } while ((I915_READ(pipe_dsl_reg) == dsl));
363
364 /* restore vblank if necessary */
365 if (restore_vblank)
366 I915_WRITE(vblank_reg, vblank);
367 /*
368 * If more than 3/4 of the scanline detected a monitor,
369 * then it is assumed to be present. This works even on i830,
370 * where there isn't any way to force the border color across
371 * the screen
372 */
373 status = detect * 4 > count * 3 ?
374 connector_status_connected :
375 connector_status_disconnected;
376 }
377
378 /* Restore previous settings */
379 I915_WRITE(bclrpat_reg, save_bclrpat);
380
381 return status;
382}
383
201static enum drm_connector_status intel_crt_detect(struct drm_connector *connector) 384static enum drm_connector_status intel_crt_detect(struct drm_connector *connector)
202{ 385{
203 struct drm_device *dev = connector->dev; 386 struct drm_device *dev = connector->dev;
387 struct intel_output *intel_output = to_intel_output(connector);
388 struct drm_encoder *encoder = &intel_output->enc;
389 struct drm_crtc *crtc;
390 int dpms_mode;
391 enum drm_connector_status status;
204 392
205 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) { 393 if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
206 if (intel_crt_detect_hotplug(connector)) 394 if (intel_crt_detect_hotplug(connector))
@@ -212,8 +400,20 @@ static enum drm_connector_status intel_crt_detect(struct drm_connector *connecto
212 if (intel_crt_detect_ddc(connector)) 400 if (intel_crt_detect_ddc(connector))
213 return connector_status_connected; 401 return connector_status_connected;
214 402
215 /* TODO use load detect */ 403 /* for pre-945g platforms use load detect */
216 return connector_status_unknown; 404 if (encoder->crtc && encoder->crtc->enabled) {
405 status = intel_crt_load_detect(encoder->crtc, intel_output);
406 } else {
407 crtc = intel_get_load_detect_pipe(intel_output,
408 NULL, &dpms_mode);
409 if (crtc) {
410 status = intel_crt_load_detect(crtc, intel_output);
411 intel_release_load_detect_pipe(intel_output, dpms_mode);
412 } else
413 status = connector_status_unknown;
414 }
415
416 return status;
217} 417}
218 418
219static void intel_crt_destroy(struct drm_connector *connector) 419static void intel_crt_destroy(struct drm_connector *connector)
@@ -236,11 +436,6 @@ static int intel_crt_set_property(struct drm_connector *connector,
236 struct drm_property *property, 436 struct drm_property *property,
237 uint64_t value) 437 uint64_t value)
238{ 438{
239 struct drm_device *dev = connector->dev;
240
241 if (property == dev->mode_config.dpms_property && connector->encoder)
242 intel_crt_dpms(connector->encoder, (uint32_t)(value & 0xf));
243
244 return 0; 439 return 0;
245} 440}
246 441
@@ -257,6 +452,7 @@ static const struct drm_encoder_helper_funcs intel_crt_helper_funcs = {
257}; 452};
258 453
259static const struct drm_connector_funcs intel_crt_connector_funcs = { 454static const struct drm_connector_funcs intel_crt_connector_funcs = {
455 .dpms = drm_helper_connector_dpms,
260 .detect = intel_crt_detect, 456 .detect = intel_crt_detect,
261 .fill_modes = drm_helper_probe_single_connector_modes, 457 .fill_modes = drm_helper_probe_single_connector_modes,
262 .destroy = intel_crt_destroy, 458 .destroy = intel_crt_destroy,
@@ -282,6 +478,7 @@ void intel_crt_init(struct drm_device *dev)
282{ 478{
283 struct drm_connector *connector; 479 struct drm_connector *connector;
284 struct intel_output *intel_output; 480 struct intel_output *intel_output;
481 u32 i2c_reg;
285 482
286 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 483 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
287 if (!intel_output) 484 if (!intel_output)
@@ -298,7 +495,11 @@ void intel_crt_init(struct drm_device *dev)
298 &intel_output->enc); 495 &intel_output->enc);
299 496
300 /* Set up the DDC bus. */ 497 /* Set up the DDC bus. */
301 intel_output->ddc_bus = intel_i2c_create(dev, GPIOA, "CRTDDC_A"); 498 if (IS_IGDNG(dev))
499 i2c_reg = PCH_GPIOA;
500 else
501 i2c_reg = GPIOA;
502 intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
302 if (!intel_output->ddc_bus) { 503 if (!intel_output->ddc_bus) {
303 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 504 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
304 "failed.\n"); 505 "failed.\n");
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3387cf32f385..028f5b66e3d8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -137,6 +137,8 @@ struct intel_limit {
137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7 137#define INTEL_LIMIT_G4X_DUAL_CHANNEL_LVDS 7
138#define INTEL_LIMIT_IGD_SDVO_DAC 8 138#define INTEL_LIMIT_IGD_SDVO_DAC 8
139#define INTEL_LIMIT_IGD_LVDS 9 139#define INTEL_LIMIT_IGD_LVDS 9
140#define INTEL_LIMIT_IGDNG_SDVO_DAC 10
141#define INTEL_LIMIT_IGDNG_LVDS 11
140 142
141/*The parameter is for SDVO on G4x platform*/ 143/*The parameter is for SDVO on G4x platform*/
142#define G4X_DOT_SDVO_MIN 25000 144#define G4X_DOT_SDVO_MIN 25000
@@ -216,12 +218,43 @@ struct intel_limit {
216#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7 218#define G4X_P2_DUAL_CHANNEL_LVDS_FAST 7
217#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0 219#define G4X_P2_DUAL_CHANNEL_LVDS_LIMIT 0
218 220
221/* IGDNG */
222/* as we calculate clock using (register_value + 2) for
223 N/M1/M2, so here the range value for them is (actual_value-2).
224 */
225#define IGDNG_DOT_MIN 25000
226#define IGDNG_DOT_MAX 350000
227#define IGDNG_VCO_MIN 1760000
228#define IGDNG_VCO_MAX 3510000
229#define IGDNG_N_MIN 1
230#define IGDNG_N_MAX 5
231#define IGDNG_M_MIN 79
232#define IGDNG_M_MAX 118
233#define IGDNG_M1_MIN 12
234#define IGDNG_M1_MAX 23
235#define IGDNG_M2_MIN 5
236#define IGDNG_M2_MAX 9
237#define IGDNG_P_SDVO_DAC_MIN 5
238#define IGDNG_P_SDVO_DAC_MAX 80
239#define IGDNG_P_LVDS_MIN 28
240#define IGDNG_P_LVDS_MAX 112
241#define IGDNG_P1_MIN 1
242#define IGDNG_P1_MAX 8
243#define IGDNG_P2_SDVO_DAC_SLOW 10
244#define IGDNG_P2_SDVO_DAC_FAST 5
245#define IGDNG_P2_LVDS_SLOW 14 /* single channel */
246#define IGDNG_P2_LVDS_FAST 7 /* double channel */
247#define IGDNG_P2_DOT_LIMIT 225000 /* 225Mhz */
248
219static bool 249static bool
220intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 250intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
221 int target, int refclk, intel_clock_t *best_clock); 251 int target, int refclk, intel_clock_t *best_clock);
222static bool 252static bool
223intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, 253intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
224 int target, int refclk, intel_clock_t *best_clock); 254 int target, int refclk, intel_clock_t *best_clock);
255static bool
256intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
257 int target, int refclk, intel_clock_t *best_clock);
225 258
226static const intel_limit_t intel_limits[] = { 259static const intel_limit_t intel_limits[] = {
227 { /* INTEL_LIMIT_I8XX_DVO_DAC */ 260 { /* INTEL_LIMIT_I8XX_DVO_DAC */
@@ -383,9 +416,47 @@ static const intel_limit_t intel_limits[] = {
383 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW }, 416 .p2_slow = I9XX_P2_LVDS_SLOW, .p2_fast = I9XX_P2_LVDS_SLOW },
384 .find_pll = intel_find_best_PLL, 417 .find_pll = intel_find_best_PLL,
385 }, 418 },
386 419 { /* INTEL_LIMIT_IGDNG_SDVO_DAC */
420 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
421 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
422 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
423 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
424 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
425 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
426 .p = { .min = IGDNG_P_SDVO_DAC_MIN, .max = IGDNG_P_SDVO_DAC_MAX },
427 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
428 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
429 .p2_slow = IGDNG_P2_SDVO_DAC_SLOW,
430 .p2_fast = IGDNG_P2_SDVO_DAC_FAST },
431 .find_pll = intel_igdng_find_best_PLL,
432 },
433 { /* INTEL_LIMIT_IGDNG_LVDS */
434 .dot = { .min = IGDNG_DOT_MIN, .max = IGDNG_DOT_MAX },
435 .vco = { .min = IGDNG_VCO_MIN, .max = IGDNG_VCO_MAX },
436 .n = { .min = IGDNG_N_MIN, .max = IGDNG_N_MAX },
437 .m = { .min = IGDNG_M_MIN, .max = IGDNG_M_MAX },
438 .m1 = { .min = IGDNG_M1_MIN, .max = IGDNG_M1_MAX },
439 .m2 = { .min = IGDNG_M2_MIN, .max = IGDNG_M2_MAX },
440 .p = { .min = IGDNG_P_LVDS_MIN, .max = IGDNG_P_LVDS_MAX },
441 .p1 = { .min = IGDNG_P1_MIN, .max = IGDNG_P1_MAX },
442 .p2 = { .dot_limit = IGDNG_P2_DOT_LIMIT,
443 .p2_slow = IGDNG_P2_LVDS_SLOW,
444 .p2_fast = IGDNG_P2_LVDS_FAST },
445 .find_pll = intel_igdng_find_best_PLL,
446 },
387}; 447};
388 448
449static const intel_limit_t *intel_igdng_limit(struct drm_crtc *crtc)
450{
451 const intel_limit_t *limit;
452 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
453 limit = &intel_limits[INTEL_LIMIT_IGDNG_LVDS];
454 else
455 limit = &intel_limits[INTEL_LIMIT_IGDNG_SDVO_DAC];
456
457 return limit;
458}
459
389static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) 460static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
390{ 461{
391 struct drm_device *dev = crtc->dev; 462 struct drm_device *dev = crtc->dev;
@@ -418,7 +489,9 @@ static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
418 struct drm_device *dev = crtc->dev; 489 struct drm_device *dev = crtc->dev;
419 const intel_limit_t *limit; 490 const intel_limit_t *limit;
420 491
421 if (IS_G4X(dev)) { 492 if (IS_IGDNG(dev))
493 limit = intel_igdng_limit(crtc);
494 else if (IS_G4X(dev)) {
422 limit = intel_g4x_limit(crtc); 495 limit = intel_g4x_limit(crtc);
423 } else if (IS_I9XX(dev) && !IS_IGD(dev)) { 496 } else if (IS_I9XX(dev) && !IS_IGD(dev)) {
424 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) 497 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS))
@@ -630,7 +703,64 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
630 } 703 }
631 } 704 }
632 } 705 }
706 return found;
707}
708
709static bool
710intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
711 int target, int refclk, intel_clock_t *best_clock)
712{
713 struct drm_device *dev = crtc->dev;
714 struct drm_i915_private *dev_priv = dev->dev_private;
715 intel_clock_t clock;
716 int max_n;
717 bool found;
718 int err_most = 47;
719 found = false;
720
721 if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
722 if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) ==
723 LVDS_CLKB_POWER_UP)
724 clock.p2 = limit->p2.p2_fast;
725 else
726 clock.p2 = limit->p2.p2_slow;
727 } else {
728 if (target < limit->p2.dot_limit)
729 clock.p2 = limit->p2.p2_slow;
730 else
731 clock.p2 = limit->p2.p2_fast;
732 }
733
734 memset(best_clock, 0, sizeof(*best_clock));
735 max_n = limit->n.max;
736 /* based on hardware requriment prefer smaller n to precision */
737 for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
738 /* based on hardware requirment prefere larger m1,m2, p1 */
739 for (clock.m1 = limit->m1.max;
740 clock.m1 >= limit->m1.min; clock.m1--) {
741 for (clock.m2 = limit->m2.max;
742 clock.m2 >= limit->m2.min; clock.m2--) {
743 for (clock.p1 = limit->p1.max;
744 clock.p1 >= limit->p1.min; clock.p1--) {
745 int this_err;
633 746
747 intel_clock(dev, refclk, &clock);
748 if (!intel_PLL_is_valid(crtc, &clock))
749 continue;
750 this_err = abs((10000 - (target*10000/clock.dot)));
751 if (this_err < err_most) {
752 *best_clock = clock;
753 err_most = this_err;
754 max_n = clock.n;
755 found = true;
756 /* found on first matching */
757 goto out;
758 }
759 }
760 }
761 }
762 }
763out:
634 return found; 764 return found;
635} 765}
636 766
@@ -785,18 +915,292 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
785 return 0; 915 return 0;
786} 916}
787 917
918static void igdng_crtc_dpms(struct drm_crtc *crtc, int mode)
919{
920 struct drm_device *dev = crtc->dev;
921 struct drm_i915_private *dev_priv = dev->dev_private;
922 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
923 int pipe = intel_crtc->pipe;
924 int plane = intel_crtc->pipe;
925 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
926 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
927 int dspcntr_reg = (plane == 0) ? DSPACNTR : DSPBCNTR;
928 int dspbase_reg = (plane == 0) ? DSPAADDR : DSPBADDR;
929 int fdi_tx_reg = (pipe == 0) ? FDI_TXA_CTL : FDI_TXB_CTL;
930 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
931 int fdi_rx_iir_reg = (pipe == 0) ? FDI_RXA_IIR : FDI_RXB_IIR;
932 int fdi_rx_imr_reg = (pipe == 0) ? FDI_RXA_IMR : FDI_RXB_IMR;
933 int transconf_reg = (pipe == 0) ? TRANSACONF : TRANSBCONF;
934 int pf_ctl_reg = (pipe == 0) ? PFA_CTL_1 : PFB_CTL_1;
935 int cpu_htot_reg = (pipe == 0) ? HTOTAL_A : HTOTAL_B;
936 int cpu_hblank_reg = (pipe == 0) ? HBLANK_A : HBLANK_B;
937 int cpu_hsync_reg = (pipe == 0) ? HSYNC_A : HSYNC_B;
938 int cpu_vtot_reg = (pipe == 0) ? VTOTAL_A : VTOTAL_B;
939 int cpu_vblank_reg = (pipe == 0) ? VBLANK_A : VBLANK_B;
940 int cpu_vsync_reg = (pipe == 0) ? VSYNC_A : VSYNC_B;
941 int trans_htot_reg = (pipe == 0) ? TRANS_HTOTAL_A : TRANS_HTOTAL_B;
942 int trans_hblank_reg = (pipe == 0) ? TRANS_HBLANK_A : TRANS_HBLANK_B;
943 int trans_hsync_reg = (pipe == 0) ? TRANS_HSYNC_A : TRANS_HSYNC_B;
944 int trans_vtot_reg = (pipe == 0) ? TRANS_VTOTAL_A : TRANS_VTOTAL_B;
945 int trans_vblank_reg = (pipe == 0) ? TRANS_VBLANK_A : TRANS_VBLANK_B;
946 int trans_vsync_reg = (pipe == 0) ? TRANS_VSYNC_A : TRANS_VSYNC_B;
947 u32 temp;
948 int tries = 5, j;
949
950 /* XXX: When our outputs are all unaware of DPMS modes other than off
951 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
952 */
953 switch (mode) {
954 case DRM_MODE_DPMS_ON:
955 case DRM_MODE_DPMS_STANDBY:
956 case DRM_MODE_DPMS_SUSPEND:
957 DRM_DEBUG("crtc %d dpms on\n", pipe);
958 /* enable PCH DPLL */
959 temp = I915_READ(pch_dpll_reg);
960 if ((temp & DPLL_VCO_ENABLE) == 0) {
961 I915_WRITE(pch_dpll_reg, temp | DPLL_VCO_ENABLE);
962 I915_READ(pch_dpll_reg);
963 }
788 964
965 /* enable PCH FDI RX PLL, wait warmup plus DMI latency */
966 temp = I915_READ(fdi_rx_reg);
967 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE |
968 FDI_SEL_PCDCLK |
969 FDI_DP_PORT_WIDTH_X4); /* default 4 lanes */
970 I915_READ(fdi_rx_reg);
971 udelay(200);
972
973 /* Enable CPU FDI TX PLL, always on for IGDNG */
974 temp = I915_READ(fdi_tx_reg);
975 if ((temp & FDI_TX_PLL_ENABLE) == 0) {
976 I915_WRITE(fdi_tx_reg, temp | FDI_TX_PLL_ENABLE);
977 I915_READ(fdi_tx_reg);
978 udelay(100);
979 }
789 980
790/** 981 /* Enable CPU pipe */
791 * Sets the power management mode of the pipe and plane. 982 temp = I915_READ(pipeconf_reg);
792 * 983 if ((temp & PIPEACONF_ENABLE) == 0) {
793 * This code should probably grow support for turning the cursor off and back 984 I915_WRITE(pipeconf_reg, temp | PIPEACONF_ENABLE);
794 * on appropriately at the same time as we're turning the pipe off/on. 985 I915_READ(pipeconf_reg);
795 */ 986 udelay(100);
796static void intel_crtc_dpms(struct drm_crtc *crtc, int mode) 987 }
988
989 /* configure and enable CPU plane */
990 temp = I915_READ(dspcntr_reg);
991 if ((temp & DISPLAY_PLANE_ENABLE) == 0) {
992 I915_WRITE(dspcntr_reg, temp | DISPLAY_PLANE_ENABLE);
993 /* Flush the plane changes */
994 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
995 }
996
997 /* enable CPU FDI TX and PCH FDI RX */
998 temp = I915_READ(fdi_tx_reg);
999 temp |= FDI_TX_ENABLE;
1000 temp |= FDI_DP_PORT_WIDTH_X4; /* default */
1001 temp &= ~FDI_LINK_TRAIN_NONE;
1002 temp |= FDI_LINK_TRAIN_PATTERN_1;
1003 I915_WRITE(fdi_tx_reg, temp);
1004 I915_READ(fdi_tx_reg);
1005
1006 temp = I915_READ(fdi_rx_reg);
1007 temp &= ~FDI_LINK_TRAIN_NONE;
1008 temp |= FDI_LINK_TRAIN_PATTERN_1;
1009 I915_WRITE(fdi_rx_reg, temp | FDI_RX_ENABLE);
1010 I915_READ(fdi_rx_reg);
1011
1012 udelay(150);
1013
1014 /* Train FDI. */
1015 /* umask FDI RX Interrupt symbol_lock and bit_lock bit
1016 for train result */
1017 temp = I915_READ(fdi_rx_imr_reg);
1018 temp &= ~FDI_RX_SYMBOL_LOCK;
1019 temp &= ~FDI_RX_BIT_LOCK;
1020 I915_WRITE(fdi_rx_imr_reg, temp);
1021 I915_READ(fdi_rx_imr_reg);
1022 udelay(150);
1023
1024 temp = I915_READ(fdi_rx_iir_reg);
1025 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1026
1027 if ((temp & FDI_RX_BIT_LOCK) == 0) {
1028 for (j = 0; j < tries; j++) {
1029 temp = I915_READ(fdi_rx_iir_reg);
1030 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1031 if (temp & FDI_RX_BIT_LOCK)
1032 break;
1033 udelay(200);
1034 }
1035 if (j != tries)
1036 I915_WRITE(fdi_rx_iir_reg,
1037 temp | FDI_RX_BIT_LOCK);
1038 else
1039 DRM_DEBUG("train 1 fail\n");
1040 } else {
1041 I915_WRITE(fdi_rx_iir_reg,
1042 temp | FDI_RX_BIT_LOCK);
1043 DRM_DEBUG("train 1 ok 2!\n");
1044 }
1045 temp = I915_READ(fdi_tx_reg);
1046 temp &= ~FDI_LINK_TRAIN_NONE;
1047 temp |= FDI_LINK_TRAIN_PATTERN_2;
1048 I915_WRITE(fdi_tx_reg, temp);
1049
1050 temp = I915_READ(fdi_rx_reg);
1051 temp &= ~FDI_LINK_TRAIN_NONE;
1052 temp |= FDI_LINK_TRAIN_PATTERN_2;
1053 I915_WRITE(fdi_rx_reg, temp);
1054
1055 udelay(150);
1056
1057 temp = I915_READ(fdi_rx_iir_reg);
1058 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1059
1060 if ((temp & FDI_RX_SYMBOL_LOCK) == 0) {
1061 for (j = 0; j < tries; j++) {
1062 temp = I915_READ(fdi_rx_iir_reg);
1063 DRM_DEBUG("FDI_RX_IIR 0x%x\n", temp);
1064 if (temp & FDI_RX_SYMBOL_LOCK)
1065 break;
1066 udelay(200);
1067 }
1068 if (j != tries) {
1069 I915_WRITE(fdi_rx_iir_reg,
1070 temp | FDI_RX_SYMBOL_LOCK);
1071 DRM_DEBUG("train 2 ok 1!\n");
1072 } else
1073 DRM_DEBUG("train 2 fail\n");
1074 } else {
1075 I915_WRITE(fdi_rx_iir_reg, temp | FDI_RX_SYMBOL_LOCK);
1076 DRM_DEBUG("train 2 ok 2!\n");
1077 }
1078 DRM_DEBUG("train done\n");
1079
1080 /* set transcoder timing */
1081 I915_WRITE(trans_htot_reg, I915_READ(cpu_htot_reg));
1082 I915_WRITE(trans_hblank_reg, I915_READ(cpu_hblank_reg));
1083 I915_WRITE(trans_hsync_reg, I915_READ(cpu_hsync_reg));
1084
1085 I915_WRITE(trans_vtot_reg, I915_READ(cpu_vtot_reg));
1086 I915_WRITE(trans_vblank_reg, I915_READ(cpu_vblank_reg));
1087 I915_WRITE(trans_vsync_reg, I915_READ(cpu_vsync_reg));
1088
1089 /* enable PCH transcoder */
1090 temp = I915_READ(transconf_reg);
1091 I915_WRITE(transconf_reg, temp | TRANS_ENABLE);
1092 I915_READ(transconf_reg);
1093
1094 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) == 0)
1095 ;
1096
1097 /* enable normal */
1098
1099 temp = I915_READ(fdi_tx_reg);
1100 temp &= ~FDI_LINK_TRAIN_NONE;
1101 I915_WRITE(fdi_tx_reg, temp | FDI_LINK_TRAIN_NONE |
1102 FDI_TX_ENHANCE_FRAME_ENABLE);
1103 I915_READ(fdi_tx_reg);
1104
1105 temp = I915_READ(fdi_rx_reg);
1106 temp &= ~FDI_LINK_TRAIN_NONE;
1107 I915_WRITE(fdi_rx_reg, temp | FDI_LINK_TRAIN_NONE |
1108 FDI_RX_ENHANCE_FRAME_ENABLE);
1109 I915_READ(fdi_rx_reg);
1110
1111 /* wait one idle pattern time */
1112 udelay(100);
1113
1114 intel_crtc_load_lut(crtc);
1115
1116 break;
1117 case DRM_MODE_DPMS_OFF:
1118 DRM_DEBUG("crtc %d dpms off\n", pipe);
1119
1120 /* Disable the VGA plane that we never use */
1121 I915_WRITE(CPU_VGACNTRL, VGA_DISP_DISABLE);
1122
1123 /* Disable display plane */
1124 temp = I915_READ(dspcntr_reg);
1125 if ((temp & DISPLAY_PLANE_ENABLE) != 0) {
1126 I915_WRITE(dspcntr_reg, temp & ~DISPLAY_PLANE_ENABLE);
1127 /* Flush the plane changes */
1128 I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
1129 I915_READ(dspbase_reg);
1130 }
1131
1132 /* disable cpu pipe, disable after all planes disabled */
1133 temp = I915_READ(pipeconf_reg);
1134 if ((temp & PIPEACONF_ENABLE) != 0) {
1135 I915_WRITE(pipeconf_reg, temp & ~PIPEACONF_ENABLE);
1136 I915_READ(pipeconf_reg);
1137 /* wait for cpu pipe off, pipe state */
1138 while ((I915_READ(pipeconf_reg) & I965_PIPECONF_ACTIVE) != 0)
1139 ;
1140 } else
1141 DRM_DEBUG("crtc %d is disabled\n", pipe);
1142
1143 /* IGDNG-A : disable cpu panel fitter ? */
1144 temp = I915_READ(pf_ctl_reg);
1145 if ((temp & PF_ENABLE) != 0) {
1146 I915_WRITE(pf_ctl_reg, temp & ~PF_ENABLE);
1147 I915_READ(pf_ctl_reg);
1148 }
1149
1150 /* disable CPU FDI tx and PCH FDI rx */
1151 temp = I915_READ(fdi_tx_reg);
1152 I915_WRITE(fdi_tx_reg, temp & ~FDI_TX_ENABLE);
1153 I915_READ(fdi_tx_reg);
1154
1155 temp = I915_READ(fdi_rx_reg);
1156 I915_WRITE(fdi_rx_reg, temp & ~FDI_RX_ENABLE);
1157 I915_READ(fdi_rx_reg);
1158
1159 /* still set train pattern 1 */
1160 temp = I915_READ(fdi_tx_reg);
1161 temp &= ~FDI_LINK_TRAIN_NONE;
1162 temp |= FDI_LINK_TRAIN_PATTERN_1;
1163 I915_WRITE(fdi_tx_reg, temp);
1164
1165 temp = I915_READ(fdi_rx_reg);
1166 temp &= ~FDI_LINK_TRAIN_NONE;
1167 temp |= FDI_LINK_TRAIN_PATTERN_1;
1168 I915_WRITE(fdi_rx_reg, temp);
1169
1170 /* disable PCH transcoder */
1171 temp = I915_READ(transconf_reg);
1172 if ((temp & TRANS_ENABLE) != 0) {
1173 I915_WRITE(transconf_reg, temp & ~TRANS_ENABLE);
1174 I915_READ(transconf_reg);
1175 /* wait for PCH transcoder off, transcoder state */
1176 while ((I915_READ(transconf_reg) & TRANS_STATE_ENABLE) != 0)
1177 ;
1178 }
1179
1180 /* disable PCH DPLL */
1181 temp = I915_READ(pch_dpll_reg);
1182 if ((temp & DPLL_VCO_ENABLE) != 0) {
1183 I915_WRITE(pch_dpll_reg, temp & ~DPLL_VCO_ENABLE);
1184 I915_READ(pch_dpll_reg);
1185 }
1186
1187 temp = I915_READ(fdi_rx_reg);
1188 if ((temp & FDI_RX_PLL_ENABLE) != 0) {
1189 temp &= ~FDI_SEL_PCDCLK;
1190 temp &= ~FDI_RX_PLL_ENABLE;
1191 I915_WRITE(fdi_rx_reg, temp);
1192 I915_READ(fdi_rx_reg);
1193 }
1194
1195 /* Wait for the clocks to turn off. */
1196 udelay(150);
1197 break;
1198 }
1199}
1200
1201static void i9xx_crtc_dpms(struct drm_crtc *crtc, int mode)
797{ 1202{
798 struct drm_device *dev = crtc->dev; 1203 struct drm_device *dev = crtc->dev;
799 struct drm_i915_master_private *master_priv;
800 struct drm_i915_private *dev_priv = dev->dev_private; 1204 struct drm_i915_private *dev_priv = dev->dev_private;
801 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 1205 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
802 int pipe = intel_crtc->pipe; 1206 int pipe = intel_crtc->pipe;
@@ -805,7 +1209,6 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
805 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR; 1209 int dspbase_reg = (pipe == 0) ? DSPAADDR : DSPBADDR;
806 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF; 1210 int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
807 u32 temp; 1211 u32 temp;
808 bool enabled;
809 1212
810 /* XXX: When our outputs are all unaware of DPMS modes other than off 1213 /* XXX: When our outputs are all unaware of DPMS modes other than off
811 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC. 1214 * and on, we should map those modes to DRM_MODE_DPMS_OFF in the CRTC.
@@ -890,6 +1293,26 @@ static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
890 udelay(150); 1293 udelay(150);
891 break; 1294 break;
892 } 1295 }
1296}
1297
1298/**
1299 * Sets the power management mode of the pipe and plane.
1300 *
1301 * This code should probably grow support for turning the cursor off and back
1302 * on appropriately at the same time as we're turning the pipe off/on.
1303 */
1304static void intel_crtc_dpms(struct drm_crtc *crtc, int mode)
1305{
1306 struct drm_device *dev = crtc->dev;
1307 struct drm_i915_master_private *master_priv;
1308 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1309 int pipe = intel_crtc->pipe;
1310 bool enabled;
1311
1312 if (IS_IGDNG(dev))
1313 igdng_crtc_dpms(crtc, mode);
1314 else
1315 i9xx_crtc_dpms(crtc, mode);
893 1316
894 if (!dev->primary->master) 1317 if (!dev->primary->master)
895 return; 1318 return;
@@ -947,6 +1370,12 @@ static bool intel_crtc_mode_fixup(struct drm_crtc *crtc,
947 struct drm_display_mode *mode, 1370 struct drm_display_mode *mode,
948 struct drm_display_mode *adjusted_mode) 1371 struct drm_display_mode *adjusted_mode)
949{ 1372{
1373 struct drm_device *dev = crtc->dev;
1374 if (IS_IGDNG(dev)) {
1375 /* FDI link clock is fixed at 2.7G */
1376 if (mode->clock * 3 > 27000 * 4)
1377 return MODE_CLOCK_HIGH;
1378 }
950 return true; 1379 return true;
951} 1380}
952 1381
@@ -1030,6 +1459,48 @@ static int intel_panel_fitter_pipe (struct drm_device *dev)
1030 return 1; 1459 return 1;
1031} 1460}
1032 1461
1462struct fdi_m_n {
1463 u32 tu;
1464 u32 gmch_m;
1465 u32 gmch_n;
1466 u32 link_m;
1467 u32 link_n;
1468};
1469
1470static void
1471fdi_reduce_ratio(u32 *num, u32 *den)
1472{
1473 while (*num > 0xffffff || *den > 0xffffff) {
1474 *num >>= 1;
1475 *den >>= 1;
1476 }
1477}
1478
1479#define DATA_N 0x800000
1480#define LINK_N 0x80000
1481
1482static void
1483igdng_compute_m_n(int bytes_per_pixel, int nlanes,
1484 int pixel_clock, int link_clock,
1485 struct fdi_m_n *m_n)
1486{
1487 u64 temp;
1488
1489 m_n->tu = 64; /* default size */
1490
1491 temp = (u64) DATA_N * pixel_clock;
1492 temp = div_u64(temp, link_clock);
1493 m_n->gmch_m = (temp * bytes_per_pixel) / nlanes;
1494 m_n->gmch_n = DATA_N;
1495 fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
1496
1497 temp = (u64) LINK_N * pixel_clock;
1498 m_n->link_m = div_u64(temp, link_clock);
1499 m_n->link_n = LINK_N;
1500 fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
1501}
1502
1503
1033static int intel_crtc_mode_set(struct drm_crtc *crtc, 1504static int intel_crtc_mode_set(struct drm_crtc *crtc,
1034 struct drm_display_mode *mode, 1505 struct drm_display_mode *mode,
1035 struct drm_display_mode *adjusted_mode, 1506 struct drm_display_mode *adjusted_mode,
@@ -1063,6 +1534,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1063 struct drm_connector *connector; 1534 struct drm_connector *connector;
1064 const intel_limit_t *limit; 1535 const intel_limit_t *limit;
1065 int ret; 1536 int ret;
1537 struct fdi_m_n m_n = {0};
1538 int data_m1_reg = (pipe == 0) ? PIPEA_DATA_M1 : PIPEB_DATA_M1;
1539 int data_n1_reg = (pipe == 0) ? PIPEA_DATA_N1 : PIPEB_DATA_N1;
1540 int link_m1_reg = (pipe == 0) ? PIPEA_LINK_M1 : PIPEB_LINK_M1;
1541 int link_n1_reg = (pipe == 0) ? PIPEA_LINK_N1 : PIPEB_LINK_N1;
1542 int pch_fp_reg = (pipe == 0) ? PCH_FPA0 : PCH_FPB0;
1543 int pch_dpll_reg = (pipe == 0) ? PCH_DPLL_A : PCH_DPLL_B;
1544 int fdi_rx_reg = (pipe == 0) ? FDI_RXA_CTL : FDI_RXB_CTL;
1545 int lvds_reg = LVDS;
1546 u32 temp;
1547 int sdvo_pixel_multiply;
1066 1548
1067 drm_vblank_pre_modeset(dev, pipe); 1549 drm_vblank_pre_modeset(dev, pipe);
1068 1550
@@ -1101,6 +1583,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1101 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000); 1583 DRM_DEBUG("using SSC reference clock of %d MHz\n", refclk / 1000);
1102 } else if (IS_I9XX(dev)) { 1584 } else if (IS_I9XX(dev)) {
1103 refclk = 96000; 1585 refclk = 96000;
1586 if (IS_IGDNG(dev))
1587 refclk = 120000; /* 120Mhz refclk */
1104 } else { 1588 } else {
1105 refclk = 48000; 1589 refclk = 48000;
1106 } 1590 }
@@ -1114,6 +1598,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1114 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock); 1598 ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
1115 if (!ok) { 1599 if (!ok) {
1116 DRM_ERROR("Couldn't find PLL settings for mode!\n"); 1600 DRM_ERROR("Couldn't find PLL settings for mode!\n");
1601 drm_vblank_post_modeset(dev, pipe);
1117 return -EINVAL; 1602 return -EINVAL;
1118 } 1603 }
1119 1604
@@ -1137,12 +1622,21 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1137 } 1622 }
1138 } 1623 }
1139 1624
1625 /* FDI link */
1626 if (IS_IGDNG(dev))
1627 igdng_compute_m_n(3, 4, /* lane num 4 */
1628 adjusted_mode->clock,
1629 270000, /* lane clock */
1630 &m_n);
1631
1140 if (IS_IGD(dev)) 1632 if (IS_IGD(dev))
1141 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2; 1633 fp = (1 << clock.n) << 16 | clock.m1 << 8 | clock.m2;
1142 else 1634 else
1143 fp = clock.n << 16 | clock.m1 << 8 | clock.m2; 1635 fp = clock.n << 16 | clock.m1 << 8 | clock.m2;
1144 1636
1145 dpll = DPLL_VGA_MODE_DIS; 1637 if (!IS_IGDNG(dev))
1638 dpll = DPLL_VGA_MODE_DIS;
1639
1146 if (IS_I9XX(dev)) { 1640 if (IS_I9XX(dev)) {
1147 if (is_lvds) 1641 if (is_lvds)
1148 dpll |= DPLLB_MODE_LVDS; 1642 dpll |= DPLLB_MODE_LVDS;
@@ -1150,17 +1644,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1150 dpll |= DPLLB_MODE_DAC_SERIAL; 1644 dpll |= DPLLB_MODE_DAC_SERIAL;
1151 if (is_sdvo) { 1645 if (is_sdvo) {
1152 dpll |= DPLL_DVO_HIGH_SPEED; 1646 dpll |= DPLL_DVO_HIGH_SPEED;
1153 if (IS_I945G(dev) || IS_I945GM(dev)) { 1647 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1154 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 1648 if (IS_I945G(dev) || IS_I945GM(dev))
1155 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES; 1649 dpll |= (sdvo_pixel_multiply - 1) << SDVO_MULTIPLIER_SHIFT_HIRES;
1156 } 1650 else if (IS_IGDNG(dev))
1651 dpll |= (sdvo_pixel_multiply - 1) << PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT;
1157 } 1652 }
1158 1653
1159 /* compute bitmask from p1 value */ 1654 /* compute bitmask from p1 value */
1160 if (IS_IGD(dev)) 1655 if (IS_IGD(dev))
1161 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD; 1656 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT_IGD;
1162 else 1657 else {
1163 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT; 1658 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA01_P1_POST_DIV_SHIFT;
1659 /* also FPA1 */
1660 if (IS_IGDNG(dev))
1661 dpll |= (1 << (clock.p1 - 1)) << DPLL_FPA1_P1_POST_DIV_SHIFT;
1662 }
1164 switch (clock.p2) { 1663 switch (clock.p2) {
1165 case 5: 1664 case 5:
1166 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5; 1665 dpll |= DPLL_DAC_SERIAL_P2_CLOCK_DIV_5;
@@ -1175,7 +1674,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1175 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14; 1674 dpll |= DPLLB_LVDS_P2_CLOCK_DIV_14;
1176 break; 1675 break;
1177 } 1676 }
1178 if (IS_I965G(dev)) 1677 if (IS_I965G(dev) && !IS_IGDNG(dev))
1179 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT); 1678 dpll |= (6 << PLL_LOAD_PULSE_PHASE_SHIFT);
1180 } else { 1679 } else {
1181 if (is_lvds) { 1680 if (is_lvds) {
@@ -1207,10 +1706,14 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1207 /* Set up the display plane register */ 1706 /* Set up the display plane register */
1208 dspcntr = DISPPLANE_GAMMA_ENABLE; 1707 dspcntr = DISPPLANE_GAMMA_ENABLE;
1209 1708
1210 if (pipe == 0) 1709 /* IGDNG's plane is forced to pipe, bit 24 is to
1211 dspcntr |= DISPPLANE_SEL_PIPE_A; 1710 enable color space conversion */
1212 else 1711 if (!IS_IGDNG(dev)) {
1213 dspcntr |= DISPPLANE_SEL_PIPE_B; 1712 if (pipe == 0)
1713 dspcntr |= DISPPLANE_SEL_PIPE_A;
1714 else
1715 dspcntr |= DISPPLANE_SEL_PIPE_B;
1716 }
1214 1717
1215 if (pipe == 0 && !IS_I965G(dev)) { 1718 if (pipe == 0 && !IS_I965G(dev)) {
1216 /* Enable pixel doubling when the dot clock is > 90% of the (display) 1719 /* Enable pixel doubling when the dot clock is > 90% of the (display)
@@ -1231,12 +1734,17 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1231 1734
1232 1735
1233 /* Disable the panel fitter if it was on our pipe */ 1736 /* Disable the panel fitter if it was on our pipe */
1234 if (intel_panel_fitter_pipe(dev) == pipe) 1737 if (!IS_IGDNG(dev) && intel_panel_fitter_pipe(dev) == pipe)
1235 I915_WRITE(PFIT_CONTROL, 0); 1738 I915_WRITE(PFIT_CONTROL, 0);
1236 1739
1237 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B'); 1740 DRM_DEBUG("Mode for pipe %c:\n", pipe == 0 ? 'A' : 'B');
1238 drm_mode_debug_printmodeline(mode); 1741 drm_mode_debug_printmodeline(mode);
1239 1742
1743 /* assign to IGDNG registers */
1744 if (IS_IGDNG(dev)) {
1745 fp_reg = pch_fp_reg;
1746 dpll_reg = pch_dpll_reg;
1747 }
1240 1748
1241 if (dpll & DPLL_VCO_ENABLE) { 1749 if (dpll & DPLL_VCO_ENABLE) {
1242 I915_WRITE(fp_reg, fp); 1750 I915_WRITE(fp_reg, fp);
@@ -1245,13 +1753,33 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1245 udelay(150); 1753 udelay(150);
1246 } 1754 }
1247 1755
1756 if (IS_IGDNG(dev)) {
1757 /* enable PCH clock reference source */
1758 /* XXX need to change the setting for other outputs */
1759 u32 temp;
1760 temp = I915_READ(PCH_DREF_CONTROL);
1761 temp &= ~DREF_NONSPREAD_SOURCE_MASK;
1762 temp |= DREF_NONSPREAD_CK505_ENABLE;
1763 temp &= ~DREF_SSC_SOURCE_MASK;
1764 temp |= DREF_SSC_SOURCE_ENABLE;
1765 temp &= ~DREF_SSC1_ENABLE;
1766 /* if no eDP, disable source output to CPU */
1767 temp &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
1768 temp |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
1769 I915_WRITE(PCH_DREF_CONTROL, temp);
1770 }
1771
1248 /* The LVDS pin pair needs to be on before the DPLLs are enabled. 1772 /* The LVDS pin pair needs to be on before the DPLLs are enabled.
1249 * This is an exception to the general rule that mode_set doesn't turn 1773 * This is an exception to the general rule that mode_set doesn't turn
1250 * things on. 1774 * things on.
1251 */ 1775 */
1252 if (is_lvds) { 1776 if (is_lvds) {
1253 u32 lvds = I915_READ(LVDS); 1777 u32 lvds;
1778
1779 if (IS_IGDNG(dev))
1780 lvds_reg = PCH_LVDS;
1254 1781
1782 lvds = I915_READ(lvds_reg);
1255 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT; 1783 lvds |= LVDS_PORT_EN | LVDS_A0A2_CLKA_POWER_UP | LVDS_PIPEB_SELECT;
1256 /* Set the B0-B3 data pairs corresponding to whether we're going to 1784 /* Set the B0-B3 data pairs corresponding to whether we're going to
1257 * set the DPLLs for dual-channel mode or not. 1785 * set the DPLLs for dual-channel mode or not.
@@ -1266,8 +1794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1266 * panels behave in the two modes. 1794 * panels behave in the two modes.
1267 */ 1795 */
1268 1796
1269 I915_WRITE(LVDS, lvds); 1797 I915_WRITE(lvds_reg, lvds);
1270 I915_READ(LVDS); 1798 I915_READ(lvds_reg);
1271 } 1799 }
1272 1800
1273 I915_WRITE(fp_reg, fp); 1801 I915_WRITE(fp_reg, fp);
@@ -1276,8 +1804,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1276 /* Wait for the clocks to stabilize. */ 1804 /* Wait for the clocks to stabilize. */
1277 udelay(150); 1805 udelay(150);
1278 1806
1279 if (IS_I965G(dev)) { 1807 if (IS_I965G(dev) && !IS_IGDNG(dev)) {
1280 int sdvo_pixel_multiply = adjusted_mode->clock / mode->clock; 1808 sdvo_pixel_multiply = adjusted_mode->clock / mode->clock;
1281 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) | 1809 I915_WRITE(dpll_md_reg, (0 << DPLL_MD_UDI_DIVIDER_SHIFT) |
1282 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT)); 1810 ((sdvo_pixel_multiply - 1) << DPLL_MD_UDI_MULTIPLIER_SHIFT));
1283 } else { 1811 } else {
@@ -1303,9 +1831,25 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1303 /* pipesrc and dspsize control the size that is scaled from, which should 1831 /* pipesrc and dspsize control the size that is scaled from, which should
1304 * always be the user's requested size. 1832 * always be the user's requested size.
1305 */ 1833 */
1306 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) | (mode->hdisplay - 1)); 1834 if (!IS_IGDNG(dev)) {
1307 I915_WRITE(dsppos_reg, 0); 1835 I915_WRITE(dspsize_reg, ((mode->vdisplay - 1) << 16) |
1836 (mode->hdisplay - 1));
1837 I915_WRITE(dsppos_reg, 0);
1838 }
1308 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1)); 1839 I915_WRITE(pipesrc_reg, ((mode->hdisplay - 1) << 16) | (mode->vdisplay - 1));
1840
1841 if (IS_IGDNG(dev)) {
1842 I915_WRITE(data_m1_reg, TU_SIZE(m_n.tu) | m_n.gmch_m);
1843 I915_WRITE(data_n1_reg, TU_SIZE(m_n.tu) | m_n.gmch_n);
1844 I915_WRITE(link_m1_reg, m_n.link_m);
1845 I915_WRITE(link_n1_reg, m_n.link_n);
1846
1847 /* enable FDI RX PLL too */
1848 temp = I915_READ(fdi_rx_reg);
1849 I915_WRITE(fdi_rx_reg, temp | FDI_RX_PLL_ENABLE);
1850 udelay(200);
1851 }
1852
1309 I915_WRITE(pipeconf_reg, pipeconf); 1853 I915_WRITE(pipeconf_reg, pipeconf);
1310 I915_READ(pipeconf_reg); 1854 I915_READ(pipeconf_reg);
1311 1855
@@ -1315,12 +1859,9 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
1315 1859
1316 /* Flush the plane changes */ 1860 /* Flush the plane changes */
1317 ret = intel_pipe_set_base(crtc, x, y, old_fb); 1861 ret = intel_pipe_set_base(crtc, x, y, old_fb);
1318 if (ret != 0)
1319 return ret;
1320
1321 drm_vblank_post_modeset(dev, pipe); 1862 drm_vblank_post_modeset(dev, pipe);
1322 1863
1323 return 0; 1864 return ret;
1324} 1865}
1325 1866
1326/** Loads the palette/gamma unit for the CRTC with the prepared values */ 1867/** Loads the palette/gamma unit for the CRTC with the prepared values */
@@ -1336,6 +1877,11 @@ void intel_crtc_load_lut(struct drm_crtc *crtc)
1336 if (!crtc->enabled) 1877 if (!crtc->enabled)
1337 return; 1878 return;
1338 1879
1880 /* use legacy palette for IGDNG */
1881 if (IS_IGDNG(dev))
1882 palreg = (intel_crtc->pipe == 0) ? LGC_PALETTE_A :
1883 LGC_PALETTE_B;
1884
1339 for (i = 0; i < 256; i++) { 1885 for (i = 0; i < 256; i++) {
1340 I915_WRITE(palreg + 4 * i, 1886 I915_WRITE(palreg + 4 * i,
1341 (intel_crtc->lut_r[i] << 16) | 1887 (intel_crtc->lut_r[i] << 16) |
@@ -1357,7 +1903,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1357 int pipe = intel_crtc->pipe; 1903 int pipe = intel_crtc->pipe;
1358 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR; 1904 uint32_t control = (pipe == 0) ? CURACNTR : CURBCNTR;
1359 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE; 1905 uint32_t base = (pipe == 0) ? CURABASE : CURBBASE;
1360 uint32_t temp; 1906 uint32_t temp = I915_READ(control);
1361 size_t addr; 1907 size_t addr;
1362 int ret; 1908 int ret;
1363 1909
@@ -1366,7 +1912,12 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1366 /* if we want to turn off the cursor ignore width and height */ 1912 /* if we want to turn off the cursor ignore width and height */
1367 if (!handle) { 1913 if (!handle) {
1368 DRM_DEBUG("cursor off\n"); 1914 DRM_DEBUG("cursor off\n");
1369 temp = CURSOR_MODE_DISABLE; 1915 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
1916 temp &= ~(CURSOR_MODE | MCURSOR_GAMMA_ENABLE);
1917 temp |= CURSOR_MODE_DISABLE;
1918 } else {
1919 temp &= ~(CURSOR_ENABLE | CURSOR_GAMMA_ENABLE);
1920 }
1370 addr = 0; 1921 addr = 0;
1371 bo = NULL; 1922 bo = NULL;
1372 mutex_lock(&dev->struct_mutex); 1923 mutex_lock(&dev->struct_mutex);
@@ -1409,10 +1960,19 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
1409 addr = obj_priv->phys_obj->handle->busaddr; 1960 addr = obj_priv->phys_obj->handle->busaddr;
1410 } 1961 }
1411 1962
1412 temp = 0; 1963 if (!IS_I9XX(dev))
1413 /* set the pipe for the cursor */ 1964 I915_WRITE(CURSIZE, (height << 12) | width);
1414 temp |= (pipe << 28); 1965
1415 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE; 1966 /* Hooray for CUR*CNTR differences */
1967 if (IS_MOBILE(dev) || IS_I9XX(dev)) {
1968 temp &= ~(CURSOR_MODE | MCURSOR_PIPE_SELECT);
1969 temp |= CURSOR_MODE_64_ARGB_AX | MCURSOR_GAMMA_ENABLE;
1970 temp |= (pipe << 28); /* Connect to correct pipe */
1971 } else {
1972 temp &= ~(CURSOR_FORMAT_MASK);
1973 temp |= CURSOR_ENABLE;
1974 temp |= CURSOR_FORMAT_ARGB | CURSOR_GAMMA_ENABLE;
1975 }
1416 1976
1417 finish: 1977 finish:
1418 I915_WRITE(control, temp); 1978 I915_WRITE(control, temp);
@@ -1450,16 +2010,16 @@ static int intel_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
1450 uint32_t adder; 2010 uint32_t adder;
1451 2011
1452 if (x < 0) { 2012 if (x < 0) {
1453 temp |= (CURSOR_POS_SIGN << CURSOR_X_SHIFT); 2013 temp |= CURSOR_POS_SIGN << CURSOR_X_SHIFT;
1454 x = -x; 2014 x = -x;
1455 } 2015 }
1456 if (y < 0) { 2016 if (y < 0) {
1457 temp |= (CURSOR_POS_SIGN << CURSOR_Y_SHIFT); 2017 temp |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT;
1458 y = -y; 2018 y = -y;
1459 } 2019 }
1460 2020
1461 temp |= ((x & CURSOR_POS_MASK) << CURSOR_X_SHIFT); 2021 temp |= x << CURSOR_X_SHIFT;
1462 temp |= ((y & CURSOR_POS_MASK) << CURSOR_Y_SHIFT); 2022 temp |= y << CURSOR_Y_SHIFT;
1463 2023
1464 adder = intel_crtc->cursor_addr; 2024 adder = intel_crtc->cursor_addr;
1465 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp); 2025 I915_WRITE((pipe == 0) ? CURAPOS : CURBPOS, temp);
@@ -1576,6 +2136,7 @@ struct drm_crtc *intel_get_load_detect_pipe(struct intel_output *intel_output,
1576 } 2136 }
1577 2137
1578 encoder->crtc = crtc; 2138 encoder->crtc = crtc;
2139 intel_output->base.encoder = encoder;
1579 intel_output->load_detect_temp = true; 2140 intel_output->load_detect_temp = true;
1580 2141
1581 intel_crtc = to_intel_crtc(crtc); 2142 intel_crtc = to_intel_crtc(crtc);
@@ -1611,6 +2172,7 @@ void intel_release_load_detect_pipe(struct intel_output *intel_output, int dpms_
1611 2172
1612 if (intel_output->load_detect_temp) { 2173 if (intel_output->load_detect_temp) {
1613 encoder->crtc = NULL; 2174 encoder->crtc = NULL;
2175 intel_output->base.encoder = NULL;
1614 intel_output->load_detect_temp = false; 2176 intel_output->load_detect_temp = false;
1615 crtc->enabled = drm_helper_crtc_in_use(crtc); 2177 crtc->enabled = drm_helper_crtc_in_use(crtc);
1616 drm_helper_disable_unused_functions(dev); 2178 drm_helper_disable_unused_functions(dev);
@@ -1748,6 +2310,8 @@ static void intel_crtc_destroy(struct drm_crtc *crtc)
1748{ 2310{
1749 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 2311 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
1750 2312
2313 if (intel_crtc->mode_set.mode)
2314 drm_mode_destroy(crtc->dev, intel_crtc->mode_set.mode);
1751 drm_crtc_cleanup(crtc); 2315 drm_crtc_cleanup(crtc);
1752 kfree(intel_crtc); 2316 kfree(intel_crtc);
1753} 2317}
@@ -1874,7 +2438,24 @@ static void intel_setup_outputs(struct drm_device *dev)
1874 if (IS_MOBILE(dev) && !IS_I830(dev)) 2438 if (IS_MOBILE(dev) && !IS_I830(dev))
1875 intel_lvds_init(dev); 2439 intel_lvds_init(dev);
1876 2440
1877 if (IS_I9XX(dev)) { 2441 if (IS_IGDNG(dev)) {
2442 int found;
2443
2444 if (I915_READ(HDMIB) & PORT_DETECTED) {
2445 /* check SDVOB */
2446 /* found = intel_sdvo_init(dev, HDMIB); */
2447 found = 0;
2448 if (!found)
2449 intel_hdmi_init(dev, HDMIB);
2450 }
2451
2452 if (I915_READ(HDMIC) & PORT_DETECTED)
2453 intel_hdmi_init(dev, HDMIC);
2454
2455 if (I915_READ(HDMID) & PORT_DETECTED)
2456 intel_hdmi_init(dev, HDMID);
2457
2458 } else if (IS_I9XX(dev)) {
1878 int found; 2459 int found;
1879 u32 reg; 2460 u32 reg;
1880 2461
@@ -1898,7 +2479,7 @@ static void intel_setup_outputs(struct drm_device *dev)
1898 } else 2479 } else
1899 intel_dvo_init(dev); 2480 intel_dvo_init(dev);
1900 2481
1901 if (IS_I9XX(dev) && IS_MOBILE(dev)) 2482 if (IS_I9XX(dev) && IS_MOBILE(dev) && !IS_IGDNG(dev))
1902 intel_tv_init(dev); 2483 intel_tv_init(dev);
1903 2484
1904 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 2485 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index 8b8d6e65cd3f..1ee3007d6ec0 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -316,6 +316,7 @@ static const struct drm_encoder_helper_funcs intel_dvo_helper_funcs = {
316}; 316};
317 317
318static const struct drm_connector_funcs intel_dvo_connector_funcs = { 318static const struct drm_connector_funcs intel_dvo_connector_funcs = {
319 .dpms = drm_helper_connector_dpms,
319 .save = intel_dvo_save, 320 .save = intel_dvo_save,
320 .restore = intel_dvo_restore, 321 .restore = intel_dvo_restore,
321 .detect = intel_dvo_detect, 322 .detect = intel_dvo_detect,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index e4652dcdd9bb..0ecf6b76a401 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -207,7 +207,7 @@ static int intelfb_set_par(struct fb_info *info)
207 207
208 if (var->pixclock != -1) { 208 if (var->pixclock != -1) {
209 209
210 DRM_ERROR("PIXEL CLCOK SET\n"); 210 DRM_ERROR("PIXEL CLOCK SET\n");
211 return -EINVAL; 211 return -EINVAL;
212 } else { 212 } else {
213 struct drm_crtc *crtc; 213 struct drm_crtc *crtc;
@@ -674,8 +674,12 @@ static int intelfb_multi_fb_probe_crtc(struct drm_device *dev, struct drm_crtc *
674 par->crtc_ids[0] = crtc->base.id; 674 par->crtc_ids[0] = crtc->base.id;
675 675
676 modeset->num_connectors = conn_count; 676 modeset->num_connectors = conn_count;
677 if (modeset->mode != modeset->crtc->desired_mode) 677 if (modeset->crtc->desired_mode) {
678 modeset->mode = modeset->crtc->desired_mode; 678 if (modeset->mode)
679 drm_mode_destroy(dev, modeset->mode);
680 modeset->mode = drm_mode_duplicate(dev,
681 modeset->crtc->desired_mode);
682 }
679 683
680 par->crtc_count = 1; 684 par->crtc_count = 1;
681 685
@@ -824,8 +828,12 @@ static int intelfb_single_fb_probe(struct drm_device *dev)
824 par->crtc_ids[crtc_count++] = crtc->base.id; 828 par->crtc_ids[crtc_count++] = crtc->base.id;
825 829
826 modeset->num_connectors = conn_count; 830 modeset->num_connectors = conn_count;
827 if (modeset->mode != modeset->crtc->desired_mode) 831 if (modeset->crtc->desired_mode) {
828 modeset->mode = modeset->crtc->desired_mode; 832 if (modeset->mode)
833 drm_mode_destroy(dev, modeset->mode);
834 modeset->mode = drm_mode_duplicate(dev,
835 modeset->crtc->desired_mode);
836 }
829 } 837 }
830 par->crtc_count = crtc_count; 838 par->crtc_count = crtc_count;
831 839
@@ -857,9 +865,15 @@ void intelfb_restore(void)
857 drm_crtc_helper_set_config(&kernelfb_mode); 865 drm_crtc_helper_set_config(&kernelfb_mode);
858} 866}
859 867
868static void intelfb_restore_work_fn(struct work_struct *ignored)
869{
870 intelfb_restore();
871}
872static DECLARE_WORK(intelfb_restore_work, intelfb_restore_work_fn);
873
860static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3) 874static void intelfb_sysrq(int dummy1, struct tty_struct *dummy3)
861{ 875{
862 intelfb_restore(); 876 schedule_work(&intelfb_restore_work);
863} 877}
864 878
865static struct sysrq_key_op sysrq_intelfb_restore_op = { 879static struct sysrq_key_op sysrq_intelfb_restore_op = {
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index d0983bb93a18..4ea2a651b92c 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -56,7 +56,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
56 sdvox = SDVO_ENCODING_HDMI | 56 sdvox = SDVO_ENCODING_HDMI |
57 SDVO_BORDER_ENABLE | 57 SDVO_BORDER_ENABLE |
58 SDVO_VSYNC_ACTIVE_HIGH | 58 SDVO_VSYNC_ACTIVE_HIGH |
59 SDVO_HSYNC_ACTIVE_HIGH; 59 SDVO_HSYNC_ACTIVE_HIGH |
60 SDVO_NULL_PACKETS_DURING_VSYNC;
60 61
61 if (hdmi_priv->has_hdmi_sink) 62 if (hdmi_priv->has_hdmi_sink)
62 sdvox |= SDVO_AUDIO_ENABLE; 63 sdvox |= SDVO_AUDIO_ENABLE;
@@ -145,6 +146,22 @@ intel_hdmi_sink_detect(struct drm_connector *connector)
145} 146}
146 147
147static enum drm_connector_status 148static enum drm_connector_status
149igdng_hdmi_detect(struct drm_connector *connector)
150{
151 struct intel_output *intel_output = to_intel_output(connector);
152 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
153
154 /* FIXME hotplug detect */
155
156 hdmi_priv->has_hdmi_sink = false;
157 intel_hdmi_sink_detect(connector);
158 if (hdmi_priv->has_hdmi_sink)
159 return connector_status_connected;
160 else
161 return connector_status_disconnected;
162}
163
164static enum drm_connector_status
148intel_hdmi_detect(struct drm_connector *connector) 165intel_hdmi_detect(struct drm_connector *connector)
149{ 166{
150 struct drm_device *dev = connector->dev; 167 struct drm_device *dev = connector->dev;
@@ -153,6 +170,9 @@ intel_hdmi_detect(struct drm_connector *connector)
153 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv; 170 struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
154 u32 temp, bit; 171 u32 temp, bit;
155 172
173 if (IS_IGDNG(dev))
174 return igdng_hdmi_detect(connector);
175
156 temp = I915_READ(PORT_HOTPLUG_EN); 176 temp = I915_READ(PORT_HOTPLUG_EN);
157 177
158 switch (hdmi_priv->sdvox_reg) { 178 switch (hdmi_priv->sdvox_reg) {
@@ -219,6 +239,7 @@ static const struct drm_encoder_helper_funcs intel_hdmi_helper_funcs = {
219}; 239};
220 240
221static const struct drm_connector_funcs intel_hdmi_connector_funcs = { 241static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
242 .dpms = drm_helper_connector_dpms,
222 .save = intel_hdmi_save, 243 .save = intel_hdmi_save,
223 .restore = intel_hdmi_restore, 244 .restore = intel_hdmi_restore,
224 .detect = intel_hdmi_detect, 245 .detect = intel_hdmi_detect,
@@ -268,8 +289,17 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
268 /* Set up the DDC bus. */ 289 /* Set up the DDC bus. */
269 if (sdvox_reg == SDVOB) 290 if (sdvox_reg == SDVOB)
270 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB"); 291 intel_output->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
271 else 292 else if (sdvox_reg == SDVOC)
272 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC"); 293 intel_output->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
294 else if (sdvox_reg == HDMIB)
295 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
296 "HDMIB");
297 else if (sdvox_reg == HDMIC)
298 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
299 "HDMIC");
300 else if (sdvox_reg == HDMID)
301 intel_output->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
302 "HDMID");
273 303
274 if (!intel_output->ddc_bus) 304 if (!intel_output->ddc_bus)
275 goto err_connector; 305 goto err_connector;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 439a86514993..f073ed8432e8 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -37,6 +37,8 @@
37#include "i915_drm.h" 37#include "i915_drm.h"
38#include "i915_drv.h" 38#include "i915_drv.h"
39 39
40#define I915_LVDS "i915_lvds"
41
40/** 42/**
41 * Sets the backlight level. 43 * Sets the backlight level.
42 * 44 *
@@ -45,10 +47,15 @@
45static void intel_lvds_set_backlight(struct drm_device *dev, int level) 47static void intel_lvds_set_backlight(struct drm_device *dev, int level)
46{ 48{
47 struct drm_i915_private *dev_priv = dev->dev_private; 49 struct drm_i915_private *dev_priv = dev->dev_private;
48 u32 blc_pwm_ctl; 50 u32 blc_pwm_ctl, reg;
51
52 if (IS_IGDNG(dev))
53 reg = BLC_PWM_CPU_CTL;
54 else
55 reg = BLC_PWM_CTL;
49 56
50 blc_pwm_ctl = I915_READ(BLC_PWM_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK; 57 blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
51 I915_WRITE(BLC_PWM_CTL, (blc_pwm_ctl | 58 I915_WRITE(reg, (blc_pwm_ctl |
52 (level << BACKLIGHT_DUTY_CYCLE_SHIFT))); 59 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
53} 60}
54 61
@@ -58,8 +65,14 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
58static u32 intel_lvds_get_max_backlight(struct drm_device *dev) 65static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
59{ 66{
60 struct drm_i915_private *dev_priv = dev->dev_private; 67 struct drm_i915_private *dev_priv = dev->dev_private;
68 u32 reg;
69
70 if (IS_IGDNG(dev))
71 reg = BLC_PWM_PCH_CTL2;
72 else
73 reg = BLC_PWM_CTL;
61 74
62 return ((I915_READ(BLC_PWM_CTL) & BACKLIGHT_MODULATION_FREQ_MASK) >> 75 return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
63 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2; 76 BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
64} 77}
65 78
@@ -69,23 +82,31 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
69static void intel_lvds_set_power(struct drm_device *dev, bool on) 82static void intel_lvds_set_power(struct drm_device *dev, bool on)
70{ 83{
71 struct drm_i915_private *dev_priv = dev->dev_private; 84 struct drm_i915_private *dev_priv = dev->dev_private;
72 u32 pp_status; 85 u32 pp_status, ctl_reg, status_reg;
86
87 if (IS_IGDNG(dev)) {
88 ctl_reg = PCH_PP_CONTROL;
89 status_reg = PCH_PP_STATUS;
90 } else {
91 ctl_reg = PP_CONTROL;
92 status_reg = PP_STATUS;
93 }
73 94
74 if (on) { 95 if (on) {
75 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) | 96 I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
76 POWER_TARGET_ON); 97 POWER_TARGET_ON);
77 do { 98 do {
78 pp_status = I915_READ(PP_STATUS); 99 pp_status = I915_READ(status_reg);
79 } while ((pp_status & PP_ON) == 0); 100 } while ((pp_status & PP_ON) == 0);
80 101
81 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle); 102 intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
82 } else { 103 } else {
83 intel_lvds_set_backlight(dev, 0); 104 intel_lvds_set_backlight(dev, 0);
84 105
85 I915_WRITE(PP_CONTROL, I915_READ(PP_CONTROL) & 106 I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
86 ~POWER_TARGET_ON); 107 ~POWER_TARGET_ON);
87 do { 108 do {
88 pp_status = I915_READ(PP_STATUS); 109 pp_status = I915_READ(status_reg);
89 } while (pp_status & PP_ON); 110 } while (pp_status & PP_ON);
90 } 111 }
91} 112}
@@ -106,12 +127,28 @@ static void intel_lvds_save(struct drm_connector *connector)
106{ 127{
107 struct drm_device *dev = connector->dev; 128 struct drm_device *dev = connector->dev;
108 struct drm_i915_private *dev_priv = dev->dev_private; 129 struct drm_i915_private *dev_priv = dev->dev_private;
130 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
131 u32 pwm_ctl_reg;
132
133 if (IS_IGDNG(dev)) {
134 pp_on_reg = PCH_PP_ON_DELAYS;
135 pp_off_reg = PCH_PP_OFF_DELAYS;
136 pp_ctl_reg = PCH_PP_CONTROL;
137 pp_div_reg = PCH_PP_DIVISOR;
138 pwm_ctl_reg = BLC_PWM_CPU_CTL;
139 } else {
140 pp_on_reg = PP_ON_DELAYS;
141 pp_off_reg = PP_OFF_DELAYS;
142 pp_ctl_reg = PP_CONTROL;
143 pp_div_reg = PP_DIVISOR;
144 pwm_ctl_reg = BLC_PWM_CTL;
145 }
109 146
110 dev_priv->savePP_ON = I915_READ(PP_ON_DELAYS); 147 dev_priv->savePP_ON = I915_READ(pp_on_reg);
111 dev_priv->savePP_OFF = I915_READ(PP_OFF_DELAYS); 148 dev_priv->savePP_OFF = I915_READ(pp_off_reg);
112 dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL); 149 dev_priv->savePP_CONTROL = I915_READ(pp_ctl_reg);
113 dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR); 150 dev_priv->savePP_DIVISOR = I915_READ(pp_div_reg);
114 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 151 dev_priv->saveBLC_PWM_CTL = I915_READ(pwm_ctl_reg);
115 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 152 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
116 BACKLIGHT_DUTY_CYCLE_MASK); 153 BACKLIGHT_DUTY_CYCLE_MASK);
117 154
@@ -127,12 +164,28 @@ static void intel_lvds_restore(struct drm_connector *connector)
127{ 164{
128 struct drm_device *dev = connector->dev; 165 struct drm_device *dev = connector->dev;
129 struct drm_i915_private *dev_priv = dev->dev_private; 166 struct drm_i915_private *dev_priv = dev->dev_private;
167 u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
168 u32 pwm_ctl_reg;
169
170 if (IS_IGDNG(dev)) {
171 pp_on_reg = PCH_PP_ON_DELAYS;
172 pp_off_reg = PCH_PP_OFF_DELAYS;
173 pp_ctl_reg = PCH_PP_CONTROL;
174 pp_div_reg = PCH_PP_DIVISOR;
175 pwm_ctl_reg = BLC_PWM_CPU_CTL;
176 } else {
177 pp_on_reg = PP_ON_DELAYS;
178 pp_off_reg = PP_OFF_DELAYS;
179 pp_ctl_reg = PP_CONTROL;
180 pp_div_reg = PP_DIVISOR;
181 pwm_ctl_reg = BLC_PWM_CTL;
182 }
130 183
131 I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL); 184 I915_WRITE(pwm_ctl_reg, dev_priv->saveBLC_PWM_CTL);
132 I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON); 185 I915_WRITE(pp_on_reg, dev_priv->savePP_ON);
133 I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF); 186 I915_WRITE(pp_off_reg, dev_priv->savePP_OFF);
134 I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR); 187 I915_WRITE(pp_div_reg, dev_priv->savePP_DIVISOR);
135 I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL); 188 I915_WRITE(pp_ctl_reg, dev_priv->savePP_CONTROL);
136 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON) 189 if (dev_priv->savePP_CONTROL & POWER_TARGET_ON)
137 intel_lvds_set_power(dev, true); 190 intel_lvds_set_power(dev, true);
138 else 191 else
@@ -216,8 +269,14 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
216{ 269{
217 struct drm_device *dev = encoder->dev; 270 struct drm_device *dev = encoder->dev;
218 struct drm_i915_private *dev_priv = dev->dev_private; 271 struct drm_i915_private *dev_priv = dev->dev_private;
272 u32 reg;
273
274 if (IS_IGDNG(dev))
275 reg = BLC_PWM_CPU_CTL;
276 else
277 reg = BLC_PWM_CTL;
219 278
220 dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL); 279 dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
221 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL & 280 dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
222 BACKLIGHT_DUTY_CYCLE_MASK); 281 BACKLIGHT_DUTY_CYCLE_MASK);
223 282
@@ -251,6 +310,10 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
251 * settings. 310 * settings.
252 */ 311 */
253 312
313 /* No panel fitting yet, fixme */
314 if (IS_IGDNG(dev))
315 return;
316
254 /* 317 /*
255 * Enable automatic panel scaling so that non-native modes fill the 318 * Enable automatic panel scaling so that non-native modes fill the
256 * screen. Should be enabled before the pipe is enabled, according to 319 * screen. Should be enabled before the pipe is enabled, according to
@@ -343,11 +406,6 @@ static int intel_lvds_set_property(struct drm_connector *connector,
343 struct drm_property *property, 406 struct drm_property *property,
344 uint64_t value) 407 uint64_t value)
345{ 408{
346 struct drm_device *dev = connector->dev;
347
348 if (property == dev->mode_config.dpms_property && connector->encoder)
349 intel_lvds_dpms(connector->encoder, (uint32_t)(value & 0xf));
350
351 return 0; 409 return 0;
352} 410}
353 411
@@ -366,6 +424,7 @@ static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs
366}; 424};
367 425
368static const struct drm_connector_funcs intel_lvds_connector_funcs = { 426static const struct drm_connector_funcs intel_lvds_connector_funcs = {
427 .dpms = drm_helper_connector_dpms,
369 .save = intel_lvds_save, 428 .save = intel_lvds_save,
370 .restore = intel_lvds_restore, 429 .restore = intel_lvds_restore,
371 .detect = intel_lvds_detect, 430 .detect = intel_lvds_detect,
@@ -386,12 +445,13 @@ static const struct drm_encoder_funcs intel_lvds_enc_funcs = {
386 445
387static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id) 446static int __init intel_no_lvds_dmi_callback(const struct dmi_system_id *id)
388{ 447{
389 DRM_DEBUG("Skipping LVDS initialization for %s\n", id->ident); 448 DRM_DEBUG_KMS(I915_LVDS,
449 "Skipping LVDS initialization for %s\n", id->ident);
390 return 1; 450 return 1;
391} 451}
392 452
393/* These systems claim to have LVDS, but really don't */ 453/* These systems claim to have LVDS, but really don't */
394static const struct dmi_system_id __initdata intel_no_lvds[] = { 454static const struct dmi_system_id intel_no_lvds[] = {
395 { 455 {
396 .callback = intel_no_lvds_dmi_callback, 456 .callback = intel_no_lvds_dmi_callback,
397 .ident = "Apple Mac Mini (Core series)", 457 .ident = "Apple Mac Mini (Core series)",
@@ -424,8 +484,21 @@ static const struct dmi_system_id __initdata intel_no_lvds[] = {
424 DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"), 484 DMI_MATCH(DMI_PRODUCT_NAME, "Studio Hybrid 140g"),
425 }, 485 },
426 }, 486 },
427 487 {
428 /* FIXME: add a check for the Aopen Mini PC */ 488 .callback = intel_no_lvds_dmi_callback,
489 .ident = "AOpen Mini PC",
490 .matches = {
491 DMI_MATCH(DMI_SYS_VENDOR, "AOpen"),
492 DMI_MATCH(DMI_PRODUCT_NAME, "i965GMx-IF"),
493 },
494 },
495 {
496 .callback = intel_no_lvds_dmi_callback,
497 .ident = "Aopen i945GTt-VFA",
498 .matches = {
499 DMI_MATCH(DMI_PRODUCT_VERSION, "AO00001JW"),
500 },
501 },
429 502
430 { } /* terminating entry */ 503 { } /* terminating entry */
431}; 504};
@@ -446,12 +519,18 @@ void intel_lvds_init(struct drm_device *dev)
446 struct drm_display_mode *scan; /* *modes, *bios_mode; */ 519 struct drm_display_mode *scan; /* *modes, *bios_mode; */
447 struct drm_crtc *crtc; 520 struct drm_crtc *crtc;
448 u32 lvds; 521 u32 lvds;
449 int pipe; 522 int pipe, gpio = GPIOC;
450 523
451 /* Skip init on machines we know falsely report LVDS */ 524 /* Skip init on machines we know falsely report LVDS */
452 if (dmi_check_system(intel_no_lvds)) 525 if (dmi_check_system(intel_no_lvds))
453 return; 526 return;
454 527
528 if (IS_IGDNG(dev)) {
529 if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
530 return;
531 gpio = PCH_GPIOC;
532 }
533
455 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL); 534 intel_output = kzalloc(sizeof(struct intel_output), GFP_KERNEL);
456 if (!intel_output) { 535 if (!intel_output) {
457 return; 536 return;
@@ -486,7 +565,7 @@ void intel_lvds_init(struct drm_device *dev)
486 */ 565 */
487 566
488 /* Set up the DDC bus. */ 567 /* Set up the DDC bus. */
489 intel_output->ddc_bus = intel_i2c_create(dev, GPIOC, "LVDSDDC_C"); 568 intel_output->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
490 if (!intel_output->ddc_bus) { 569 if (!intel_output->ddc_bus) {
491 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration " 570 dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
492 "failed.\n"); 571 "failed.\n");
@@ -511,10 +590,10 @@ void intel_lvds_init(struct drm_device *dev)
511 } 590 }
512 591
513 /* Failed to get EDID, what about VBT? */ 592 /* Failed to get EDID, what about VBT? */
514 if (dev_priv->vbt_mode) { 593 if (dev_priv->lfp_lvds_vbt_mode) {
515 mutex_lock(&dev->mode_config.mutex); 594 mutex_lock(&dev->mode_config.mutex);
516 dev_priv->panel_fixed_mode = 595 dev_priv->panel_fixed_mode =
517 drm_mode_duplicate(dev, dev_priv->vbt_mode); 596 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
518 mutex_unlock(&dev->mode_config.mutex); 597 mutex_unlock(&dev->mode_config.mutex);
519 if (dev_priv->panel_fixed_mode) { 598 if (dev_priv->panel_fixed_mode) {
520 dev_priv->panel_fixed_mode->type |= 599 dev_priv->panel_fixed_mode->type |=
@@ -528,6 +607,11 @@ void intel_lvds_init(struct drm_device *dev)
528 * on. If so, assume that whatever is currently programmed is the 607 * on. If so, assume that whatever is currently programmed is the
529 * correct mode. 608 * correct mode.
530 */ 609 */
610
611 /* IGDNG: FIXME if still fail, not try pipe mode now */
612 if (IS_IGDNG(dev))
613 goto failed;
614
531 lvds = I915_READ(LVDS); 615 lvds = I915_READ(LVDS);
532 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0; 616 pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
533 crtc = intel_get_crtc_from_pipe(dev, pipe); 617 crtc = intel_get_crtc_from_pipe(dev, pipe);
@@ -546,11 +630,22 @@ void intel_lvds_init(struct drm_device *dev)
546 goto failed; 630 goto failed;
547 631
548out: 632out:
633 if (IS_IGDNG(dev)) {
634 u32 pwm;
635 /* make sure PWM is enabled */
636 pwm = I915_READ(BLC_PWM_CPU_CTL2);
637 pwm |= (PWM_ENABLE | PWM_PIPE_B);
638 I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
639
640 pwm = I915_READ(BLC_PWM_PCH_CTL1);
641 pwm |= PWM_PCH_ENABLE;
642 I915_WRITE(BLC_PWM_PCH_CTL1, pwm);
643 }
549 drm_sysfs_connector_add(connector); 644 drm_sysfs_connector_add(connector);
550 return; 645 return;
551 646
552failed: 647failed:
553 DRM_DEBUG("No LVDS modes found, disabling.\n"); 648 DRM_DEBUG_KMS(I915_LVDS, "No LVDS modes found, disabling.\n");
554 if (intel_output->ddc_bus) 649 if (intel_output->ddc_bus)
555 intel_i2c_destroy(intel_output->ddc_bus); 650 intel_i2c_destroy(intel_output->ddc_bus);
556 drm_connector_cleanup(connector); 651 drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 9913651c1e17..9a00adb3a508 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -36,7 +36,7 @@
36#include "intel_sdvo_regs.h" 36#include "intel_sdvo_regs.h"
37 37
38#undef SDVO_DEBUG 38#undef SDVO_DEBUG
39 39#define I915_SDVO "i915_sdvo"
40struct intel_sdvo_priv { 40struct intel_sdvo_priv {
41 struct intel_i2c_chan *i2c_bus; 41 struct intel_i2c_chan *i2c_bus;
42 int slaveaddr; 42 int slaveaddr;
@@ -69,6 +69,10 @@ struct intel_sdvo_priv {
69 * This is set if we treat the device as HDMI, instead of DVI. 69 * This is set if we treat the device as HDMI, instead of DVI.
70 */ 70 */
71 bool is_hdmi; 71 bool is_hdmi;
72 /**
73 * This is set if we detect output of sdvo device as LVDS.
74 */
75 bool is_lvds;
72 76
73 /** 77 /**
74 * Returned SDTV resolutions allowed for the current format, if the 78 * Returned SDTV resolutions allowed for the current format, if the
@@ -273,20 +277,21 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
273 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 277 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
274 int i; 278 int i;
275 279
276 printk(KERN_DEBUG "%s: W: %02X ", SDVO_NAME(sdvo_priv), cmd); 280 DRM_DEBUG_KMS(I915_SDVO, "%s: W: %02X ",
281 SDVO_NAME(sdvo_priv), cmd);
277 for (i = 0; i < args_len; i++) 282 for (i = 0; i < args_len; i++)
278 printk(KERN_DEBUG "%02X ", ((u8 *)args)[i]); 283 DRM_LOG_KMS("%02X ", ((u8 *)args)[i]);
279 for (; i < 8; i++) 284 for (; i < 8; i++)
280 printk(KERN_DEBUG " "); 285 DRM_LOG_KMS(" ");
281 for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) { 286 for (i = 0; i < sizeof(sdvo_cmd_names) / sizeof(sdvo_cmd_names[0]); i++) {
282 if (cmd == sdvo_cmd_names[i].cmd) { 287 if (cmd == sdvo_cmd_names[i].cmd) {
283 printk(KERN_DEBUG "(%s)", sdvo_cmd_names[i].name); 288 DRM_LOG_KMS("(%s)", sdvo_cmd_names[i].name);
284 break; 289 break;
285 } 290 }
286 } 291 }
287 if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0])) 292 if (i == sizeof(sdvo_cmd_names)/ sizeof(sdvo_cmd_names[0]))
288 printk(KERN_DEBUG "(%02X)", cmd); 293 DRM_LOG_KMS("(%02X)", cmd);
289 printk(KERN_DEBUG "\n"); 294 DRM_LOG_KMS("\n");
290} 295}
291#else 296#else
292#define intel_sdvo_debug_write(o, c, a, l) 297#define intel_sdvo_debug_write(o, c, a, l)
@@ -325,16 +330,16 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
325 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv; 330 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
326 int i; 331 int i;
327 332
328 printk(KERN_DEBUG "%s: R: ", SDVO_NAME(sdvo_priv)); 333 DRM_DEBUG_KMS(I915_SDVO, "%s: R: ", SDVO_NAME(sdvo_priv));
329 for (i = 0; i < response_len; i++) 334 for (i = 0; i < response_len; i++)
330 printk(KERN_DEBUG "%02X ", ((u8 *)response)[i]); 335 DRM_LOG_KMS("%02X ", ((u8 *)response)[i]);
331 for (; i < 8; i++) 336 for (; i < 8; i++)
332 printk(KERN_DEBUG " "); 337 DRM_LOG_KMS(" ");
333 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP) 338 if (status <= SDVO_CMD_STATUS_SCALING_NOT_SUPP)
334 printk(KERN_DEBUG "(%s)", cmd_status_names[status]); 339 DRM_LOG_KMS("(%s)", cmd_status_names[status]);
335 else 340 else
336 printk(KERN_DEBUG "(??? %d)", status); 341 DRM_LOG_KMS("(??? %d)", status);
337 printk(KERN_DEBUG "\n"); 342 DRM_LOG_KMS("\n");
338} 343}
339#else 344#else
340#define intel_sdvo_debug_response(o, r, l, s) 345#define intel_sdvo_debug_response(o, r, l, s)
@@ -1398,10 +1403,8 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
1398static void intel_sdvo_get_ddc_modes(struct drm_connector *connector) 1403static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
1399{ 1404{
1400 struct intel_output *intel_output = to_intel_output(connector); 1405 struct intel_output *intel_output = to_intel_output(connector);
1401 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1402 1406
1403 /* set the bus switch and get the modes */ 1407 /* set the bus switch and get the modes */
1404 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1405 intel_ddc_get_modes(intel_output); 1408 intel_ddc_get_modes(intel_output);
1406 1409
1407#if 0 1410#if 0
@@ -1543,6 +1546,37 @@ static void intel_sdvo_get_tv_modes(struct drm_connector *connector)
1543 } 1546 }
1544} 1547}
1545 1548
1549static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
1550{
1551 struct intel_output *intel_output = to_intel_output(connector);
1552 struct intel_sdvo_priv *sdvo_priv = intel_output->dev_priv;
1553 struct drm_i915_private *dev_priv = connector->dev->dev_private;
1554
1555 /*
1556 * Attempt to get the mode list from DDC.
1557 * Assume that the preferred modes are
1558 * arranged in priority order.
1559 */
1560 /* set the bus switch and get the modes */
1561 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1562 intel_ddc_get_modes(intel_output);
1563 if (list_empty(&connector->probed_modes) == false)
1564 return;
1565
1566 /* Fetch modes from VBT */
1567 if (dev_priv->sdvo_lvds_vbt_mode != NULL) {
1568 struct drm_display_mode *newmode;
1569 newmode = drm_mode_duplicate(connector->dev,
1570 dev_priv->sdvo_lvds_vbt_mode);
1571 if (newmode != NULL) {
1572 /* Guarantee the mode is preferred */
1573 newmode->type = (DRM_MODE_TYPE_PREFERRED |
1574 DRM_MODE_TYPE_DRIVER);
1575 drm_mode_probed_add(connector, newmode);
1576 }
1577 }
1578}
1579
1546static int intel_sdvo_get_modes(struct drm_connector *connector) 1580static int intel_sdvo_get_modes(struct drm_connector *connector)
1547{ 1581{
1548 struct intel_output *output = to_intel_output(connector); 1582 struct intel_output *output = to_intel_output(connector);
@@ -1550,6 +1584,8 @@ static int intel_sdvo_get_modes(struct drm_connector *connector)
1550 1584
1551 if (sdvo_priv->is_tv) 1585 if (sdvo_priv->is_tv)
1552 intel_sdvo_get_tv_modes(connector); 1586 intel_sdvo_get_tv_modes(connector);
1587 else if (sdvo_priv->is_lvds == true)
1588 intel_sdvo_get_lvds_modes(connector);
1553 else 1589 else
1554 intel_sdvo_get_ddc_modes(connector); 1590 intel_sdvo_get_ddc_modes(connector);
1555 1591
@@ -1564,6 +1600,9 @@ static void intel_sdvo_destroy(struct drm_connector *connector)
1564 1600
1565 if (intel_output->i2c_bus) 1601 if (intel_output->i2c_bus)
1566 intel_i2c_destroy(intel_output->i2c_bus); 1602 intel_i2c_destroy(intel_output->i2c_bus);
1603 if (intel_output->ddc_bus)
1604 intel_i2c_destroy(intel_output->ddc_bus);
1605
1567 drm_sysfs_connector_remove(connector); 1606 drm_sysfs_connector_remove(connector);
1568 drm_connector_cleanup(connector); 1607 drm_connector_cleanup(connector);
1569 kfree(intel_output); 1608 kfree(intel_output);
@@ -1578,6 +1617,7 @@ static const struct drm_encoder_helper_funcs intel_sdvo_helper_funcs = {
1578}; 1617};
1579 1618
1580static const struct drm_connector_funcs intel_sdvo_connector_funcs = { 1619static const struct drm_connector_funcs intel_sdvo_connector_funcs = {
1620 .dpms = drm_helper_connector_dpms,
1581 .save = intel_sdvo_save, 1621 .save = intel_sdvo_save,
1582 .restore = intel_sdvo_restore, 1622 .restore = intel_sdvo_restore,
1583 .detect = intel_sdvo_detect, 1623 .detect = intel_sdvo_detect,
@@ -1660,33 +1700,107 @@ intel_sdvo_get_digital_encoding_mode(struct intel_output *output)
1660 return true; 1700 return true;
1661} 1701}
1662 1702
1703static struct intel_output *
1704intel_sdvo_chan_to_intel_output(struct intel_i2c_chan *chan)
1705{
1706 struct drm_device *dev = chan->drm_dev;
1707 struct drm_connector *connector;
1708 struct intel_output *intel_output = NULL;
1709
1710 list_for_each_entry(connector,
1711 &dev->mode_config.connector_list, head) {
1712 if (to_intel_output(connector)->ddc_bus == chan) {
1713 intel_output = to_intel_output(connector);
1714 break;
1715 }
1716 }
1717 return intel_output;
1718}
1719
1720static int intel_sdvo_master_xfer(struct i2c_adapter *i2c_adap,
1721 struct i2c_msg msgs[], int num)
1722{
1723 struct intel_output *intel_output;
1724 struct intel_sdvo_priv *sdvo_priv;
1725 struct i2c_algo_bit_data *algo_data;
1726 struct i2c_algorithm *algo;
1727
1728 algo_data = (struct i2c_algo_bit_data *)i2c_adap->algo_data;
1729 intel_output =
1730 intel_sdvo_chan_to_intel_output(
1731 (struct intel_i2c_chan *)(algo_data->data));
1732 if (intel_output == NULL)
1733 return -EINVAL;
1734
1735 sdvo_priv = intel_output->dev_priv;
1736 algo = (struct i2c_algorithm *)intel_output->i2c_bus->adapter.algo;
1737
1738 intel_sdvo_set_control_bus_switch(intel_output, sdvo_priv->ddc_bus);
1739 return algo->master_xfer(i2c_adap, msgs, num);
1740}
1741
1742static struct i2c_algorithm intel_sdvo_i2c_bit_algo = {
1743 .master_xfer = intel_sdvo_master_xfer,
1744};
1745
1746static u8
1747intel_sdvo_get_slave_addr(struct drm_device *dev, int output_device)
1748{
1749 struct drm_i915_private *dev_priv = dev->dev_private;
1750 struct sdvo_device_mapping *my_mapping, *other_mapping;
1751
1752 if (output_device == SDVOB) {
1753 my_mapping = &dev_priv->sdvo_mappings[0];
1754 other_mapping = &dev_priv->sdvo_mappings[1];
1755 } else {
1756 my_mapping = &dev_priv->sdvo_mappings[1];
1757 other_mapping = &dev_priv->sdvo_mappings[0];
1758 }
1759
1760 /* If the BIOS described our SDVO device, take advantage of it. */
1761 if (my_mapping->slave_addr)
1762 return my_mapping->slave_addr;
1763
1764 /* If the BIOS only described a different SDVO device, use the
1765 * address that it isn't using.
1766 */
1767 if (other_mapping->slave_addr) {
1768 if (other_mapping->slave_addr == 0x70)
1769 return 0x72;
1770 else
1771 return 0x70;
1772 }
1773
1774 /* No SDVO device info is found for another DVO port,
1775 * so use mapping assumption we had before BIOS parsing.
1776 */
1777 if (output_device == SDVOB)
1778 return 0x70;
1779 else
1780 return 0x72;
1781}
1782
1663bool intel_sdvo_init(struct drm_device *dev, int output_device) 1783bool intel_sdvo_init(struct drm_device *dev, int output_device)
1664{ 1784{
1665 struct drm_connector *connector; 1785 struct drm_connector *connector;
1666 struct intel_output *intel_output; 1786 struct intel_output *intel_output;
1667 struct intel_sdvo_priv *sdvo_priv; 1787 struct intel_sdvo_priv *sdvo_priv;
1668 struct intel_i2c_chan *i2cbus = NULL; 1788 struct intel_i2c_chan *i2cbus = NULL;
1789 struct intel_i2c_chan *ddcbus = NULL;
1669 int connector_type; 1790 int connector_type;
1670 u8 ch[0x40]; 1791 u8 ch[0x40];
1671 int i; 1792 int i;
1672 int encoder_type, output_id; 1793 int encoder_type, output_id;
1794 u8 slave_addr;
1673 1795
1674 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL); 1796 intel_output = kcalloc(sizeof(struct intel_output)+sizeof(struct intel_sdvo_priv), 1, GFP_KERNEL);
1675 if (!intel_output) { 1797 if (!intel_output) {
1676 return false; 1798 return false;
1677 } 1799 }
1678 1800
1679 connector = &intel_output->base;
1680
1681 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1682 DRM_MODE_CONNECTOR_Unknown);
1683 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1684 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1); 1801 sdvo_priv = (struct intel_sdvo_priv *)(intel_output + 1);
1685 intel_output->type = INTEL_OUTPUT_SDVO; 1802 intel_output->type = INTEL_OUTPUT_SDVO;
1686 1803
1687 connector->interlace_allowed = 0;
1688 connector->doublescan_allowed = 0;
1689
1690 /* setup the DDC bus. */ 1804 /* setup the DDC bus. */
1691 if (output_device == SDVOB) 1805 if (output_device == SDVOB)
1692 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB"); 1806 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOB");
@@ -1694,32 +1808,47 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1694 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC"); 1808 i2cbus = intel_i2c_create(dev, GPIOE, "SDVOCTRL_E for SDVOC");
1695 1809
1696 if (!i2cbus) 1810 if (!i2cbus)
1697 goto err_connector; 1811 goto err_inteloutput;
1698 1812
1813 slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
1699 sdvo_priv->i2c_bus = i2cbus; 1814 sdvo_priv->i2c_bus = i2cbus;
1700 1815
1701 if (output_device == SDVOB) { 1816 if (output_device == SDVOB) {
1702 output_id = 1; 1817 output_id = 1;
1703 sdvo_priv->i2c_bus->slave_addr = 0x38;
1704 } else { 1818 } else {
1705 output_id = 2; 1819 output_id = 2;
1706 sdvo_priv->i2c_bus->slave_addr = 0x39;
1707 } 1820 }
1708 1821 sdvo_priv->i2c_bus->slave_addr = slave_addr >> 1;
1709 sdvo_priv->output_device = output_device; 1822 sdvo_priv->output_device = output_device;
1710 intel_output->i2c_bus = i2cbus; 1823 intel_output->i2c_bus = i2cbus;
1711 intel_output->dev_priv = sdvo_priv; 1824 intel_output->dev_priv = sdvo_priv;
1712 1825
1713
1714 /* Read the regs to test if we can talk to the device */ 1826 /* Read the regs to test if we can talk to the device */
1715 for (i = 0; i < 0x40; i++) { 1827 for (i = 0; i < 0x40; i++) {
1716 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) { 1828 if (!intel_sdvo_read_byte(intel_output, i, &ch[i])) {
1717 DRM_DEBUG("No SDVO device found on SDVO%c\n", 1829 DRM_DEBUG_KMS(I915_SDVO,
1718 output_device == SDVOB ? 'B' : 'C'); 1830 "No SDVO device found on SDVO%c\n",
1831 output_device == SDVOB ? 'B' : 'C');
1719 goto err_i2c; 1832 goto err_i2c;
1720 } 1833 }
1721 } 1834 }
1722 1835
1836 /* setup the DDC bus. */
1837 if (output_device == SDVOB)
1838 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOB DDC BUS");
1839 else
1840 ddcbus = intel_i2c_create(dev, GPIOE, "SDVOC DDC BUS");
1841
1842 if (ddcbus == NULL)
1843 goto err_i2c;
1844
1845 intel_sdvo_i2c_bit_algo.functionality =
1846 intel_output->i2c_bus->adapter.algo->functionality;
1847 ddcbus->adapter.algo = &intel_sdvo_i2c_bit_algo;
1848 intel_output->ddc_bus = ddcbus;
1849
1850 /* In defaut case sdvo lvds is false */
1851 sdvo_priv->is_lvds = false;
1723 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps); 1852 intel_sdvo_get_capabilities(intel_output, &sdvo_priv->caps);
1724 1853
1725 if (sdvo_priv->caps.output_flags & 1854 if (sdvo_priv->caps.output_flags &
@@ -1729,7 +1858,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1729 else 1858 else
1730 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1; 1859 sdvo_priv->controlled_output = SDVO_OUTPUT_TMDS1;
1731 1860
1732 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1733 encoder_type = DRM_MODE_ENCODER_TMDS; 1861 encoder_type = DRM_MODE_ENCODER_TMDS;
1734 connector_type = DRM_MODE_CONNECTOR_DVID; 1862 connector_type = DRM_MODE_CONNECTOR_DVID;
1735 1863
@@ -1747,7 +1875,6 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1747 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0) 1875 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_SVID0)
1748 { 1876 {
1749 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0; 1877 sdvo_priv->controlled_output = SDVO_OUTPUT_SVID0;
1750 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1751 encoder_type = DRM_MODE_ENCODER_TVDAC; 1878 encoder_type = DRM_MODE_ENCODER_TVDAC;
1752 connector_type = DRM_MODE_CONNECTOR_SVIDEO; 1879 connector_type = DRM_MODE_CONNECTOR_SVIDEO;
1753 sdvo_priv->is_tv = true; 1880 sdvo_priv->is_tv = true;
@@ -1756,30 +1883,28 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1756 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0) 1883 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB0)
1757 { 1884 {
1758 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0; 1885 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB0;
1759 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1760 encoder_type = DRM_MODE_ENCODER_DAC; 1886 encoder_type = DRM_MODE_ENCODER_DAC;
1761 connector_type = DRM_MODE_CONNECTOR_VGA; 1887 connector_type = DRM_MODE_CONNECTOR_VGA;
1762 } 1888 }
1763 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1) 1889 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_RGB1)
1764 { 1890 {
1765 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1; 1891 sdvo_priv->controlled_output = SDVO_OUTPUT_RGB1;
1766 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1767 encoder_type = DRM_MODE_ENCODER_DAC; 1892 encoder_type = DRM_MODE_ENCODER_DAC;
1768 connector_type = DRM_MODE_CONNECTOR_VGA; 1893 connector_type = DRM_MODE_CONNECTOR_VGA;
1769 } 1894 }
1770 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0) 1895 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS0)
1771 { 1896 {
1772 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0; 1897 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS0;
1773 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1774 encoder_type = DRM_MODE_ENCODER_LVDS; 1898 encoder_type = DRM_MODE_ENCODER_LVDS;
1775 connector_type = DRM_MODE_CONNECTOR_LVDS; 1899 connector_type = DRM_MODE_CONNECTOR_LVDS;
1900 sdvo_priv->is_lvds = true;
1776 } 1901 }
1777 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1) 1902 else if (sdvo_priv->caps.output_flags & SDVO_OUTPUT_LVDS1)
1778 { 1903 {
1779 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1; 1904 sdvo_priv->controlled_output = SDVO_OUTPUT_LVDS1;
1780 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1781 encoder_type = DRM_MODE_ENCODER_LVDS; 1905 encoder_type = DRM_MODE_ENCODER_LVDS;
1782 connector_type = DRM_MODE_CONNECTOR_LVDS; 1906 connector_type = DRM_MODE_CONNECTOR_LVDS;
1907 sdvo_priv->is_lvds = true;
1783 } 1908 }
1784 else 1909 else
1785 { 1910 {
@@ -1787,17 +1912,25 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1787 1912
1788 sdvo_priv->controlled_output = 0; 1913 sdvo_priv->controlled_output = 0;
1789 memcpy (bytes, &sdvo_priv->caps.output_flags, 2); 1914 memcpy (bytes, &sdvo_priv->caps.output_flags, 2);
1790 DRM_DEBUG("%s: Unknown SDVO output type (0x%02x%02x)\n", 1915 DRM_DEBUG_KMS(I915_SDVO,
1791 SDVO_NAME(sdvo_priv), 1916 "%s: Unknown SDVO output type (0x%02x%02x)\n",
1792 bytes[0], bytes[1]); 1917 SDVO_NAME(sdvo_priv),
1918 bytes[0], bytes[1]);
1793 encoder_type = DRM_MODE_ENCODER_NONE; 1919 encoder_type = DRM_MODE_ENCODER_NONE;
1794 connector_type = DRM_MODE_CONNECTOR_Unknown; 1920 connector_type = DRM_MODE_CONNECTOR_Unknown;
1795 goto err_i2c; 1921 goto err_i2c;
1796 } 1922 }
1797 1923
1924 connector = &intel_output->base;
1925 drm_connector_init(dev, connector, &intel_sdvo_connector_funcs,
1926 connector_type);
1927 drm_connector_helper_add(connector, &intel_sdvo_connector_helper_funcs);
1928 connector->interlace_allowed = 0;
1929 connector->doublescan_allowed = 0;
1930 connector->display_info.subpixel_order = SubPixelHorizontalRGB;
1931
1798 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type); 1932 drm_encoder_init(dev, &intel_output->enc, &intel_sdvo_enc_funcs, encoder_type);
1799 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs); 1933 drm_encoder_helper_add(&intel_output->enc, &intel_sdvo_helper_funcs);
1800 connector->connector_type = connector_type;
1801 1934
1802 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc); 1935 drm_mode_connector_attach_encoder(&intel_output->base, &intel_output->enc);
1803 drm_sysfs_connector_add(connector); 1936 drm_sysfs_connector_add(connector);
@@ -1812,31 +1945,30 @@ bool intel_sdvo_init(struct drm_device *dev, int output_device)
1812 &sdvo_priv->pixel_clock_max); 1945 &sdvo_priv->pixel_clock_max);
1813 1946
1814 1947
1815 DRM_DEBUG("%s device VID/DID: %02X:%02X.%02X, " 1948 DRM_DEBUG_KMS(I915_SDVO, "%s device VID/DID: %02X:%02X.%02X, "
1816 "clock range %dMHz - %dMHz, " 1949 "clock range %dMHz - %dMHz, "
1817 "input 1: %c, input 2: %c, " 1950 "input 1: %c, input 2: %c, "
1818 "output 1: %c, output 2: %c\n", 1951 "output 1: %c, output 2: %c\n",
1819 SDVO_NAME(sdvo_priv), 1952 SDVO_NAME(sdvo_priv),
1820 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id, 1953 sdvo_priv->caps.vendor_id, sdvo_priv->caps.device_id,
1821 sdvo_priv->caps.device_rev_id, 1954 sdvo_priv->caps.device_rev_id,
1822 sdvo_priv->pixel_clock_min / 1000, 1955 sdvo_priv->pixel_clock_min / 1000,
1823 sdvo_priv->pixel_clock_max / 1000, 1956 sdvo_priv->pixel_clock_max / 1000,
1824 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N', 1957 (sdvo_priv->caps.sdvo_inputs_mask & 0x1) ? 'Y' : 'N',
1825 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N', 1958 (sdvo_priv->caps.sdvo_inputs_mask & 0x2) ? 'Y' : 'N',
1826 /* check currently supported outputs */ 1959 /* check currently supported outputs */
1827 sdvo_priv->caps.output_flags & 1960 sdvo_priv->caps.output_flags &
1828 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N', 1961 (SDVO_OUTPUT_TMDS0 | SDVO_OUTPUT_RGB0) ? 'Y' : 'N',
1829 sdvo_priv->caps.output_flags & 1962 sdvo_priv->caps.output_flags &
1830 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); 1963 (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N');
1831 1964
1832 intel_output->ddc_bus = i2cbus;
1833
1834 return true; 1965 return true;
1835 1966
1836err_i2c: 1967err_i2c:
1968 if (ddcbus != NULL)
1969 intel_i2c_destroy(intel_output->ddc_bus);
1837 intel_i2c_destroy(intel_output->i2c_bus); 1970 intel_i2c_destroy(intel_output->i2c_bus);
1838err_connector: 1971err_inteloutput:
1839 drm_connector_cleanup(connector);
1840 kfree(intel_output); 1972 kfree(intel_output);
1841 1973
1842 return false; 1974 return false;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index d2c32983242d..50d7ed70b338 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1392,6 +1392,9 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
1392 tv_ctl &= ~TV_TEST_MODE_MASK; 1392 tv_ctl &= ~TV_TEST_MODE_MASK;
1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT; 1393 tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
1394 tv_dac &= ~TVDAC_SENSE_MASK; 1394 tv_dac &= ~TVDAC_SENSE_MASK;
1395 tv_dac &= ~DAC_A_MASK;
1396 tv_dac &= ~DAC_B_MASK;
1397 tv_dac &= ~DAC_C_MASK;
1395 tv_dac |= (TVDAC_STATE_CHG_EN | 1398 tv_dac |= (TVDAC_STATE_CHG_EN |
1396 TVDAC_A_SENSE_CTL | 1399 TVDAC_A_SENSE_CTL |
1397 TVDAC_B_SENSE_CTL | 1400 TVDAC_B_SENSE_CTL |
@@ -1626,6 +1629,7 @@ static const struct drm_encoder_helper_funcs intel_tv_helper_funcs = {
1626}; 1629};
1627 1630
1628static const struct drm_connector_funcs intel_tv_connector_funcs = { 1631static const struct drm_connector_funcs intel_tv_connector_funcs = {
1632 .dpms = drm_helper_connector_dpms,
1629 .save = intel_tv_save, 1633 .save = intel_tv_save,
1630 .restore = intel_tv_restore, 1634 .restore = intel_tv_restore,
1631 .detect = intel_tv_detect, 1635 .detect = intel_tv_detect,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index bc9d09dfa8e7..146f3570af8e 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -478,26 +478,27 @@ static void r700_cp_load_microcode(drm_radeon_private_t *dev_priv)
478 478
479 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) { 479 if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV770)) {
480 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 480 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
481 DRM_INFO("Loading RV770 PFP Microcode\n"); 481 DRM_INFO("Loading RV770/RV790 PFP Microcode\n");
482 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 482 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
483 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]); 483 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV770_pfp_microcode[i]);
484 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 484 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
485 485
486 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 486 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
487 DRM_INFO("Loading RV770 CP Microcode\n"); 487 DRM_INFO("Loading RV770/RV790 CP Microcode\n");
488 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 488 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
489 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]); 489 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV770_cp_microcode[i]);
490 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 490 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
491 491
492 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730)) { 492 } else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV730) ||
493 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV740)) {
493 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 494 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
494 DRM_INFO("Loading RV730 PFP Microcode\n"); 495 DRM_INFO("Loading RV730/RV740 PFP Microcode\n");
495 for (i = 0; i < R700_PFP_UCODE_SIZE; i++) 496 for (i = 0; i < R700_PFP_UCODE_SIZE; i++)
496 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]); 497 RADEON_WRITE(R600_CP_PFP_UCODE_DATA, RV730_pfp_microcode[i]);
497 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0); 498 RADEON_WRITE(R600_CP_PFP_UCODE_ADDR, 0);
498 499
499 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 500 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
500 DRM_INFO("Loading RV730 CP Microcode\n"); 501 DRM_INFO("Loading RV730/RV740 CP Microcode\n");
501 for (i = 0; i < R700_PM4_UCODE_SIZE; i++) 502 for (i = 0; i < R700_PM4_UCODE_SIZE; i++)
502 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]); 503 RADEON_WRITE(R600_CP_ME_RAM_DATA, RV730_cp_microcode[i]);
503 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0); 504 RADEON_WRITE(R600_CP_ME_RAM_WADDR, 0);
@@ -1324,6 +1325,10 @@ static void r700_gfx_init(struct drm_device *dev,
1324 dev_priv->r700_sc_prim_fifo_size = 0xf9; 1325 dev_priv->r700_sc_prim_fifo_size = 0xf9;
1325 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1326 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1326 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1327 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1328 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1329 dev_priv->r600_sx_max_export_pos_size -= 16;
1330 dev_priv->r600_sx_max_export_smx_size += 16;
1331 }
1327 break; 1332 break;
1328 case CHIP_RV710: 1333 case CHIP_RV710:
1329 dev_priv->r600_max_pipes = 2; 1334 dev_priv->r600_max_pipes = 2;
@@ -1345,6 +1350,31 @@ static void r700_gfx_init(struct drm_device *dev,
1345 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30; 1350 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1346 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130; 1351 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1347 break; 1352 break;
1353 case CHIP_RV740:
1354 dev_priv->r600_max_pipes = 4;
1355 dev_priv->r600_max_tile_pipes = 4;
1356 dev_priv->r600_max_simds = 8;
1357 dev_priv->r600_max_backends = 4;
1358 dev_priv->r600_max_gprs = 256;
1359 dev_priv->r600_max_threads = 248;
1360 dev_priv->r600_max_stack_entries = 512;
1361 dev_priv->r600_max_hw_contexts = 8;
1362 dev_priv->r600_max_gs_threads = 16 * 2;
1363 dev_priv->r600_sx_max_export_size = 256;
1364 dev_priv->r600_sx_max_export_pos_size = 32;
1365 dev_priv->r600_sx_max_export_smx_size = 224;
1366 dev_priv->r600_sq_num_cf_insts = 2;
1367
1368 dev_priv->r700_sx_num_of_sets = 7;
1369 dev_priv->r700_sc_prim_fifo_size = 0x100;
1370 dev_priv->r700_sc_hiz_tile_fifo_size = 0x30;
1371 dev_priv->r700_sc_earlyz_tile_fifo_fize = 0x130;
1372
1373 if (dev_priv->r600_sx_max_export_pos_size > 16) {
1374 dev_priv->r600_sx_max_export_pos_size -= 16;
1375 dev_priv->r600_sx_max_export_smx_size += 16;
1376 }
1377 break;
1348 default: 1378 default:
1349 break; 1379 break;
1350 } 1380 }
@@ -1493,6 +1523,7 @@ static void r700_gfx_init(struct drm_device *dev,
1493 break; 1523 break;
1494 case CHIP_RV730: 1524 case CHIP_RV730:
1495 case CHIP_RV710: 1525 case CHIP_RV710:
1526 case CHIP_RV740:
1496 default: 1527 default:
1497 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4); 1528 sq_ms_fifo_sizes |= R600_FETCH_FIFO_HIWATER(0x4);
1498 break; 1529 break;
@@ -1569,6 +1600,7 @@ static void r700_gfx_init(struct drm_device *dev,
1569 switch (dev_priv->flags & RADEON_FAMILY_MASK) { 1600 switch (dev_priv->flags & RADEON_FAMILY_MASK) {
1570 case CHIP_RV770: 1601 case CHIP_RV770:
1571 case CHIP_RV730: 1602 case CHIP_RV730:
1603 case CHIP_RV740:
1572 gs_prim_buffer_depth = 384; 1604 gs_prim_buffer_depth = 384;
1573 break; 1605 break;
1574 case CHIP_RV710: 1606 case CHIP_RV710:
diff --git a/drivers/gpu/drm/radeon/radeon_cp.c b/drivers/gpu/drm/radeon/radeon_cp.c
index 77a7a4d84650..89c4c44169f7 100644
--- a/drivers/gpu/drm/radeon/radeon_cp.c
+++ b/drivers/gpu/drm/radeon/radeon_cp.c
@@ -2109,7 +2109,7 @@ int radeon_master_create(struct drm_device *dev, struct drm_master *master)
2109 2109
2110 /* prebuild the SAREA */ 2110 /* prebuild the SAREA */
2111 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE); 2111 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
2112 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK|_DRM_DRIVER, 2112 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM, _DRM_CONTAINS_LOCK,
2113 &master_priv->sarea); 2113 &master_priv->sarea);
2114 if (ret) { 2114 if (ret) {
2115 DRM_ERROR("SAREA setup failed\n"); 2115 DRM_ERROR("SAREA setup failed\n");
@@ -2185,9 +2185,9 @@ void radeon_commit_ring(drm_radeon_private_t *dev_priv)
2185 2185
2186 /* check if the ring is padded out to 16-dword alignment */ 2186 /* check if the ring is padded out to 16-dword alignment */
2187 2187
2188 tail_aligned = dev_priv->ring.tail & 0xf; 2188 tail_aligned = dev_priv->ring.tail & (RADEON_RING_ALIGN-1);
2189 if (tail_aligned) { 2189 if (tail_aligned) {
2190 int num_p2 = 16 - tail_aligned; 2190 int num_p2 = RADEON_RING_ALIGN - tail_aligned;
2191 2191
2192 ring = dev_priv->ring.start; 2192 ring = dev_priv->ring.start;
2193 /* pad with some CP_PACKET2 */ 2193 /* pad with some CP_PACKET2 */
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index 8071d965f142..127d0456f628 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -146,6 +146,7 @@ enum radeon_family {
146 CHIP_RV770, 146 CHIP_RV770,
147 CHIP_RV730, 147 CHIP_RV730,
148 CHIP_RV710, 148 CHIP_RV710,
149 CHIP_RV740,
149 CHIP_LAST, 150 CHIP_LAST,
150}; 151};
151 152
@@ -1964,11 +1965,14 @@ do { \
1964 1965
1965#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring; 1966#define RING_LOCALS int write, _nr, _align_nr; unsigned int mask; u32 *ring;
1966 1967
1968#define RADEON_RING_ALIGN 16
1969
1967#define BEGIN_RING( n ) do { \ 1970#define BEGIN_RING( n ) do { \
1968 if ( RADEON_VERBOSE ) { \ 1971 if ( RADEON_VERBOSE ) { \
1969 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \ 1972 DRM_INFO( "BEGIN_RING( %d )\n", (n)); \
1970 } \ 1973 } \
1971 _align_nr = (n + 0xf) & ~0xf; \ 1974 _align_nr = RADEON_RING_ALIGN - ((dev_priv->ring.tail + n) & (RADEON_RING_ALIGN-1)); \
1975 _align_nr += n; \
1972 if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \ 1976 if (dev_priv->ring.space <= (_align_nr * sizeof(u32))) { \
1973 COMMIT_RING(); \ 1977 COMMIT_RING(); \
1974 radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \ 1978 radeon_wait_ring( dev_priv, _align_nr * sizeof(u32)); \
diff --git a/drivers/gpu/drm/via/via_dmablit.c b/drivers/gpu/drm/via/via_dmablit.c
index 409e00afdd07..327380888b4a 100644
--- a/drivers/gpu/drm/via/via_dmablit.c
+++ b/drivers/gpu/drm/via/via_dmablit.c
@@ -195,10 +195,8 @@ via_free_sg_info(struct pci_dev *pdev, drm_via_sg_info_t *vsg)
195 default: 195 default:
196 vsg->state = dr_via_sg_init; 196 vsg->state = dr_via_sg_init;
197 } 197 }
198 if (vsg->bounce_buffer) { 198 vfree(vsg->bounce_buffer);
199 vfree(vsg->bounce_buffer); 199 vsg->bounce_buffer = NULL;
200 vsg->bounce_buffer = NULL;
201 }
202 vsg->free_on_sequence = 0; 200 vsg->free_on_sequence = 0;
203} 201}
204 202