aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/Kconfig2
-rw-r--r--drivers/gpu/drm/drm_crtc.c8
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c40
-rw-r--r--drivers/gpu/drm/drm_debugfs.c12
-rw-r--r--drivers/gpu/drm/drm_drv.c2
-rw-r--r--drivers/gpu/drm/drm_irq.c31
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_buf.h21
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_connector.c78
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c76
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.h25
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c83
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.h1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c66
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c71
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c89
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h28
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c62
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c21
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c411
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c22
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_channel.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_i2c.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_object.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_perf.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_sgdma.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c13
-rw-r--r--drivers/gpu/drm/nouveau/nv40_pm.c20
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_vram.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_graph.c43
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_grctx.c31
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_vram.c16
-rw-r--r--drivers/gpu/drm/nouveau/nvd0_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c41
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c60
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c195
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h29
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h31
-rw-r--r--drivers/gpu/drm/radeon/r100.c7
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600.c118
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h53
-rw-r--r--drivers/gpu/drm/radeon/radeon_acpi.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c304
-rw-r--r--drivers/gpu/drm/radeon/radeon_benchmark.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c18
-rw-r--r--drivers/gpu/drm/radeon/rs600.c7
-rw-r--r--drivers/gpu/drm/radeon/rv770.c7
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c4
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c25
81 files changed, 1786 insertions, 924 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index 785127cb281b..1368826ef284 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -9,7 +9,6 @@ menuconfig DRM
9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU 9 depends on (AGP || AGP=n) && !EMULATED_CMPXCHG && MMU
10 select I2C 10 select I2C
11 select I2C_ALGOBIT 11 select I2C_ALGOBIT
12 select SLOW_WORK
13 help 12 help
14 Kernel-level support for the Direct Rendering Infrastructure (DRI) 13 Kernel-level support for the Direct Rendering Infrastructure (DRI)
15 introduced in XFree86 4.0. If you say Y here, you need to select 14 introduced in XFree86 4.0. If you say Y here, you need to select
@@ -96,6 +95,7 @@ config DRM_I915
96 select FB_CFB_IMAGEBLIT 95 select FB_CFB_IMAGEBLIT
97 # i915 depends on ACPI_VIDEO when ACPI is enabled 96 # i915 depends on ACPI_VIDEO when ACPI is enabled
98 # but for select to work, need to select ACPI_VIDEO's dependencies, ick 97 # but for select to work, need to select ACPI_VIDEO's dependencies, ick
98 select BACKLIGHT_LCD_SUPPORT if ACPI
99 select BACKLIGHT_CLASS_DEVICE if ACPI 99 select BACKLIGHT_CLASS_DEVICE if ACPI
100 select VIDEO_OUTPUT_CONTROL if ACPI 100 select VIDEO_OUTPUT_CONTROL if ACPI
101 select INPUT if ACPI 101 select INPUT if ACPI
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 9a2e2a14b3bb..8323fc389840 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
1873 } 1873 }
1874 1874
1875 if (num_clips && clips_ptr) { 1875 if (num_clips && clips_ptr) {
1876 if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
1877 ret = -EINVAL;
1878 goto out_err1;
1879 }
1876 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 1880 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
1877 if (!clips) { 1881 if (!clips) {
1878 ret = -ENOMEM; 1882 ret = -ENOMEM;
@@ -2118,8 +2122,10 @@ struct drm_property *drm_property_create(struct drm_device *dev, int flags,
2118 property->num_values = num_values; 2122 property->num_values = num_values;
2119 INIT_LIST_HEAD(&property->enum_blob_list); 2123 INIT_LIST_HEAD(&property->enum_blob_list);
2120 2124
2121 if (name) 2125 if (name) {
2122 strncpy(property->name, name, DRM_PROP_NAME_LEN); 2126 strncpy(property->name, name, DRM_PROP_NAME_LEN);
2127 property->name[DRM_PROP_NAME_LEN-1] = '\0';
2128 }
2123 2129
2124 list_add_tail(&property->head, &dev->mode_config.property_list); 2130 list_add_tail(&property->head, &dev->mode_config.property_list);
2125 return property; 2131 return property;
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 2957636161e8..d2619d72cece 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -456,6 +456,30 @@ done:
456EXPORT_SYMBOL(drm_crtc_helper_set_mode); 456EXPORT_SYMBOL(drm_crtc_helper_set_mode);
457 457
458 458
459static int
460drm_crtc_helper_disable(struct drm_crtc *crtc)
461{
462 struct drm_device *dev = crtc->dev;
463 struct drm_connector *connector;
464 struct drm_encoder *encoder;
465
466 /* Decouple all encoders and their attached connectors from this crtc */
467 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
468 if (encoder->crtc != crtc)
469 continue;
470
471 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
472 if (connector->encoder != encoder)
473 continue;
474
475 connector->encoder = NULL;
476 }
477 }
478
479 drm_helper_disable_unused_functions(dev);
480 return 0;
481}
482
459/** 483/**
460 * drm_crtc_helper_set_config - set a new config from userspace 484 * drm_crtc_helper_set_config - set a new config from userspace
461 * @crtc: CRTC to setup 485 * @crtc: CRTC to setup
@@ -484,6 +508,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
484 struct drm_connector *save_connectors, *connector; 508 struct drm_connector *save_connectors, *connector;
485 int count = 0, ro, fail = 0; 509 int count = 0, ro, fail = 0;
486 struct drm_crtc_helper_funcs *crtc_funcs; 510 struct drm_crtc_helper_funcs *crtc_funcs;
511 struct drm_mode_set save_set;
487 int ret = 0; 512 int ret = 0;
488 int i; 513 int i;
489 514
@@ -509,8 +534,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
509 (int)set->num_connectors, set->x, set->y); 534 (int)set->num_connectors, set->x, set->y);
510 } else { 535 } else {
511 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id); 536 DRM_DEBUG_KMS("[CRTC:%d] [NOFB]\n", set->crtc->base.id);
512 set->mode = NULL; 537 return drm_crtc_helper_disable(set->crtc);
513 set->num_connectors = 0;
514 } 538 }
515 539
516 dev = set->crtc->dev; 540 dev = set->crtc->dev;
@@ -556,6 +580,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
556 save_connectors[count++] = *connector; 580 save_connectors[count++] = *connector;
557 } 581 }
558 582
583 save_set.crtc = set->crtc;
584 save_set.mode = &set->crtc->mode;
585 save_set.x = set->crtc->x;
586 save_set.y = set->crtc->y;
587 save_set.fb = set->crtc->fb;
588
559 /* We should be able to check here if the fb has the same properties 589 /* We should be able to check here if the fb has the same properties
560 * and then just flip_or_move it */ 590 * and then just flip_or_move it */
561 if (set->crtc->fb != set->fb) { 591 if (set->crtc->fb != set->fb) {
@@ -721,6 +751,12 @@ fail:
721 *connector = save_connectors[count++]; 751 *connector = save_connectors[count++];
722 } 752 }
723 753
754 /* Try to restore the config */
755 if (mode_changed &&
756 !drm_crtc_helper_set_mode(save_set.crtc, save_set.mode, save_set.x,
757 save_set.y, save_set.fb))
758 DRM_ERROR("failed to restore config after modeset failure\n");
759
724 kfree(save_connectors); 760 kfree(save_connectors);
725 kfree(save_encoders); 761 kfree(save_encoders);
726 kfree(save_crtcs); 762 kfree(save_crtcs);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index d067c12ba940..1c7a1c0d3edd 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -118,7 +118,10 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
118 tmp->minor = minor; 118 tmp->minor = minor;
119 tmp->dent = ent; 119 tmp->dent = ent;
120 tmp->info_ent = &files[i]; 120 tmp->info_ent = &files[i];
121 list_add(&(tmp->list), &(minor->debugfs_nodes.list)); 121
122 mutex_lock(&minor->debugfs_lock);
123 list_add(&tmp->list, &minor->debugfs_list);
124 mutex_unlock(&minor->debugfs_lock);
122 } 125 }
123 return 0; 126 return 0;
124 127
@@ -146,7 +149,8 @@ int drm_debugfs_init(struct drm_minor *minor, int minor_id,
146 char name[64]; 149 char name[64];
147 int ret; 150 int ret;
148 151
149 INIT_LIST_HEAD(&minor->debugfs_nodes.list); 152 INIT_LIST_HEAD(&minor->debugfs_list);
153 mutex_init(&minor->debugfs_lock);
150 sprintf(name, "%d", minor_id); 154 sprintf(name, "%d", minor_id);
151 minor->debugfs_root = debugfs_create_dir(name, root); 155 minor->debugfs_root = debugfs_create_dir(name, root);
152 if (!minor->debugfs_root) { 156 if (!minor->debugfs_root) {
@@ -192,8 +196,9 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
192 struct drm_info_node *tmp; 196 struct drm_info_node *tmp;
193 int i; 197 int i;
194 198
199 mutex_lock(&minor->debugfs_lock);
195 for (i = 0; i < count; i++) { 200 for (i = 0; i < count; i++) {
196 list_for_each_safe(pos, q, &minor->debugfs_nodes.list) { 201 list_for_each_safe(pos, q, &minor->debugfs_list) {
197 tmp = list_entry(pos, struct drm_info_node, list); 202 tmp = list_entry(pos, struct drm_info_node, list);
198 if (tmp->info_ent == &files[i]) { 203 if (tmp->info_ent == &files[i]) {
199 debugfs_remove(tmp->dent); 204 debugfs_remove(tmp->dent);
@@ -202,6 +207,7 @@ int drm_debugfs_remove_files(struct drm_info_list *files, int count,
202 } 207 }
203 } 208 }
204 } 209 }
210 mutex_unlock(&minor->debugfs_lock);
205 return 0; 211 return 0;
206} 212}
207EXPORT_SYMBOL(drm_debugfs_remove_files); 213EXPORT_SYMBOL(drm_debugfs_remove_files);
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index fc81af9dbf42..40c187c60f44 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -125,7 +125,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 125 DRM_IOCTL_DEF(DRM_IOCTL_SG_ALLOC, drm_sg_alloc_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), 126 DRM_IOCTL_DEF(DRM_IOCTL_SG_FREE, drm_sg_free, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
127 127
128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0), 128 DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, DRM_UNLOCKED),
129 129
130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), 130 DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
131 131
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index cb3794a00f98..44a5d0ad8b7c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
110 /* Prevent vblank irq processing while disabling vblank irqs, 110 /* Prevent vblank irq processing while disabling vblank irqs,
111 * so no updates of timestamps or count can happen after we've 111 * so no updates of timestamps or count can happen after we've
112 * disabled. Needed to prevent races in case of delayed irq's. 112 * disabled. Needed to prevent races in case of delayed irq's.
113 * Disable preemption, so vblank_time_lock is held as short as
114 * possible, even under a kernel with PREEMPT_RT patches.
115 */ 113 */
116 preempt_disable();
117 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 114 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
118 115
119 dev->driver->disable_vblank(dev, crtc); 116 dev->driver->disable_vblank(dev, crtc);
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 clear_vblank_timestamps(dev, crtc); 161 clear_vblank_timestamps(dev, crtc);
165 162
166 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 163 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
167 preempt_enable();
168} 164}
169 165
170static void vblank_disable_fn(unsigned long arg) 166static void vblank_disable_fn(unsigned long arg)
@@ -407,13 +403,16 @@ int drm_irq_uninstall(struct drm_device *dev)
407 /* 403 /*
408 * Wake up any waiters so they don't hang. 404 * Wake up any waiters so they don't hang.
409 */ 405 */
410 spin_lock_irqsave(&dev->vbl_lock, irqflags); 406 if (dev->num_crtcs) {
411 for (i = 0; i < dev->num_crtcs; i++) { 407 spin_lock_irqsave(&dev->vbl_lock, irqflags);
412 DRM_WAKEUP(&dev->vbl_queue[i]); 408 for (i = 0; i < dev->num_crtcs; i++) {
413 dev->vblank_enabled[i] = 0; 409 DRM_WAKEUP(&dev->vbl_queue[i]);
414 dev->last_vblank[i] = dev->driver->get_vblank_counter(dev, i); 410 dev->vblank_enabled[i] = 0;
411 dev->last_vblank[i] =
412 dev->driver->get_vblank_counter(dev, i);
413 }
414 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
415 } 415 }
416 spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
417 416
418 if (!irq_enabled) 417 if (!irq_enabled)
419 return -EINVAL; 418 return -EINVAL;
@@ -886,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
886 spin_lock_irqsave(&dev->vbl_lock, irqflags); 885 spin_lock_irqsave(&dev->vbl_lock, irqflags);
887 /* Going from 0->1 means we have to enable interrupts again */ 886 /* Going from 0->1 means we have to enable interrupts again */
888 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 887 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
889 /* Disable preemption while holding vblank_time_lock. Do
890 * it explicitely to guard against PREEMPT_RT kernel.
891 */
892 preempt_disable();
893 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 888 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
894 if (!dev->vblank_enabled[crtc]) { 889 if (!dev->vblank_enabled[crtc]) {
895 /* Enable vblank irqs under vblank_time_lock protection. 890 /* Enable vblank irqs under vblank_time_lock protection.
@@ -909,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
909 } 904 }
910 } 905 }
911 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 906 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
912 preempt_enable();
913 } else { 907 } else {
914 if (!dev->vblank_enabled[crtc]) { 908 if (!dev->vblank_enabled[crtc]) {
915 atomic_dec(&dev->vblank_refcount[crtc]); 909 atomic_dec(&dev->vblank_refcount[crtc]);
@@ -1125,6 +1119,7 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
1125 trace_drm_vblank_event_delivered(current->pid, pipe, 1119 trace_drm_vblank_event_delivered(current->pid, pipe,
1126 vblwait->request.sequence); 1120 vblwait->request.sequence);
1127 } else { 1121 } else {
1122 /* drm_handle_vblank_events will call drm_vblank_put */
1128 list_add_tail(&e->base.link, &dev->vblank_event_list); 1123 list_add_tail(&e->base.link, &dev->vblank_event_list);
1129 vblwait->reply.sequence = vblwait->request.sequence; 1124 vblwait->reply.sequence = vblwait->request.sequence;
1130 } 1125 }
@@ -1205,8 +1200,12 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
1205 goto done; 1200 goto done;
1206 } 1201 }
1207 1202
1208 if (flags & _DRM_VBLANK_EVENT) 1203 if (flags & _DRM_VBLANK_EVENT) {
1204 /* must hold on to the vblank ref until the event fires
1205 * drm_vblank_put will be called asynchronously
1206 */
1209 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv); 1207 return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
1208 }
1210 1209
1211 if ((flags & _DRM_VBLANK_NEXTONMISS) && 1210 if ((flags & _DRM_VBLANK_NEXTONMISS) &&
1212 (seq - vblwait->request.sequence) <= (1<<23)) { 1211 (seq - vblwait->request.sequence) <= (1<<23)) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.c b/drivers/gpu/drm/exynos/exynos_drm_buf.c
index 6f8afea94fc9..2bb07bca511a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.c
@@ -27,82 +27,84 @@
27#include "drm.h" 27#include "drm.h"
28 28
29#include "exynos_drm_drv.h" 29#include "exynos_drm_drv.h"
30#include "exynos_drm_gem.h"
30#include "exynos_drm_buf.h" 31#include "exynos_drm_buf.h"
31 32
32static DEFINE_MUTEX(exynos_drm_buf_lock);
33
34static int lowlevel_buffer_allocate(struct drm_device *dev, 33static int lowlevel_buffer_allocate(struct drm_device *dev,
35 struct exynos_drm_buf_entry *entry) 34 struct exynos_drm_gem_buf *buffer)
36{ 35{
37 DRM_DEBUG_KMS("%s\n", __FILE__); 36 DRM_DEBUG_KMS("%s\n", __FILE__);
38 37
39 entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size, 38 buffer->kvaddr = dma_alloc_writecombine(dev->dev, buffer->size,
40 (dma_addr_t *)&entry->paddr, GFP_KERNEL); 39 &buffer->dma_addr, GFP_KERNEL);
41 if (!entry->paddr) { 40 if (!buffer->kvaddr) {
42 DRM_ERROR("failed to allocate buffer.\n"); 41 DRM_ERROR("failed to allocate buffer.\n");
43 return -ENOMEM; 42 return -ENOMEM;
44 } 43 }
45 44
46 DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n", 45 DRM_DEBUG_KMS("vaddr(0x%lx), dma_addr(0x%lx), size(0x%lx)\n",
47 (unsigned int)entry->vaddr, entry->paddr, entry->size); 46 (unsigned long)buffer->kvaddr,
47 (unsigned long)buffer->dma_addr,
48 buffer->size);
48 49
49 return 0; 50 return 0;
50} 51}
51 52
52static void lowlevel_buffer_deallocate(struct drm_device *dev, 53static void lowlevel_buffer_deallocate(struct drm_device *dev,
53 struct exynos_drm_buf_entry *entry) 54 struct exynos_drm_gem_buf *buffer)
54{ 55{
55 DRM_DEBUG_KMS("%s.\n", __FILE__); 56 DRM_DEBUG_KMS("%s.\n", __FILE__);
56 57
57 if (entry->paddr && entry->vaddr && entry->size) 58 if (buffer->dma_addr && buffer->size)
58 dma_free_writecombine(dev->dev, entry->size, entry->vaddr, 59 dma_free_writecombine(dev->dev, buffer->size, buffer->kvaddr,
59 entry->paddr); 60 (dma_addr_t)buffer->dma_addr);
60 else 61 else
61 DRM_DEBUG_KMS("entry data is null.\n"); 62 DRM_DEBUG_KMS("buffer data are invalid.\n");
62} 63}
63 64
64struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 65struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
65 unsigned int size) 66 unsigned int size)
66{ 67{
67 struct exynos_drm_buf_entry *entry; 68 struct exynos_drm_gem_buf *buffer;
68 69
69 DRM_DEBUG_KMS("%s.\n", __FILE__); 70 DRM_DEBUG_KMS("%s.\n", __FILE__);
71 DRM_DEBUG_KMS("desired size = 0x%x\n", size);
70 72
71 entry = kzalloc(sizeof(*entry), GFP_KERNEL); 73 buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
72 if (!entry) { 74 if (!buffer) {
73 DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n"); 75 DRM_ERROR("failed to allocate exynos_drm_gem_buf.\n");
74 return ERR_PTR(-ENOMEM); 76 return ERR_PTR(-ENOMEM);
75 } 77 }
76 78
77 entry->size = size; 79 buffer->size = size;
78 80
79 /* 81 /*
80 * allocate memory region with size and set the memory information 82 * allocate memory region with size and set the memory information
81 * to vaddr and paddr of a entry object. 83 * to vaddr and dma_addr of a buffer object.
82 */ 84 */
83 if (lowlevel_buffer_allocate(dev, entry) < 0) { 85 if (lowlevel_buffer_allocate(dev, buffer) < 0) {
84 kfree(entry); 86 kfree(buffer);
85 entry = NULL; 87 buffer = NULL;
86 return ERR_PTR(-ENOMEM); 88 return ERR_PTR(-ENOMEM);
87 } 89 }
88 90
89 return entry; 91 return buffer;
90} 92}
91 93
92void exynos_drm_buf_destroy(struct drm_device *dev, 94void exynos_drm_buf_destroy(struct drm_device *dev,
93 struct exynos_drm_buf_entry *entry) 95 struct exynos_drm_gem_buf *buffer)
94{ 96{
95 DRM_DEBUG_KMS("%s.\n", __FILE__); 97 DRM_DEBUG_KMS("%s.\n", __FILE__);
96 98
97 if (!entry) { 99 if (!buffer) {
98 DRM_DEBUG_KMS("entry is null.\n"); 100 DRM_DEBUG_KMS("buffer is null.\n");
99 return; 101 return;
100 } 102 }
101 103
102 lowlevel_buffer_deallocate(dev, entry); 104 lowlevel_buffer_deallocate(dev, buffer);
103 105
104 kfree(entry); 106 kfree(buffer);
105 entry = NULL; 107 buffer = NULL;
106} 108}
107 109
108MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 110MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_buf.h b/drivers/gpu/drm/exynos/exynos_drm_buf.h
index 045d59eab01a..6e91f9caa5db 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_buf.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_buf.h
@@ -26,28 +26,15 @@
26#ifndef _EXYNOS_DRM_BUF_H_ 26#ifndef _EXYNOS_DRM_BUF_H_
27#define _EXYNOS_DRM_BUF_H_ 27#define _EXYNOS_DRM_BUF_H_
28 28
29/*
30 * exynos drm buffer entry structure.
31 *
32 * @paddr: physical address of allocated memory.
33 * @vaddr: kernel virtual address of allocated memory.
34 * @size: size of allocated memory.
35 */
36struct exynos_drm_buf_entry {
37 dma_addr_t paddr;
38 void __iomem *vaddr;
39 unsigned int size;
40};
41
42/* allocate physical memory. */ 29/* allocate physical memory. */
43struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev, 30struct exynos_drm_gem_buf *exynos_drm_buf_create(struct drm_device *dev,
44 unsigned int size); 31 unsigned int size);
45 32
46/* get physical memory information of a drm framebuffer. */ 33/* get memory information of a drm framebuffer. */
47struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb); 34struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
48 35
49/* remove allocated physical memory. */ 36/* remove allocated physical memory. */
50void exynos_drm_buf_destroy(struct drm_device *dev, 37void exynos_drm_buf_destroy(struct drm_device *dev,
51 struct exynos_drm_buf_entry *entry); 38 struct exynos_drm_gem_buf *buffer);
52 39
53#endif 40#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_connector.c b/drivers/gpu/drm/exynos/exynos_drm_connector.c
index 985d9e768728..d620b0784257 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_connector.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_connector.c
@@ -37,6 +37,8 @@
37 37
38struct exynos_drm_connector { 38struct exynos_drm_connector {
39 struct drm_connector drm_connector; 39 struct drm_connector drm_connector;
40 uint32_t encoder_id;
41 struct exynos_drm_manager *manager;
40}; 42};
41 43
42/* convert exynos_video_timings to drm_display_mode */ 44/* convert exynos_video_timings to drm_display_mode */
@@ -47,6 +49,7 @@ convert_to_display_mode(struct drm_display_mode *mode,
47 DRM_DEBUG_KMS("%s\n", __FILE__); 49 DRM_DEBUG_KMS("%s\n", __FILE__);
48 50
49 mode->clock = timing->pixclock / 1000; 51 mode->clock = timing->pixclock / 1000;
52 mode->vrefresh = timing->refresh;
50 53
51 mode->hdisplay = timing->xres; 54 mode->hdisplay = timing->xres;
52 mode->hsync_start = mode->hdisplay + timing->left_margin; 55 mode->hsync_start = mode->hdisplay + timing->left_margin;
@@ -57,6 +60,12 @@ convert_to_display_mode(struct drm_display_mode *mode,
57 mode->vsync_start = mode->vdisplay + timing->upper_margin; 60 mode->vsync_start = mode->vdisplay + timing->upper_margin;
58 mode->vsync_end = mode->vsync_start + timing->vsync_len; 61 mode->vsync_end = mode->vsync_start + timing->vsync_len;
59 mode->vtotal = mode->vsync_end + timing->lower_margin; 62 mode->vtotal = mode->vsync_end + timing->lower_margin;
63
64 if (timing->vmode & FB_VMODE_INTERLACED)
65 mode->flags |= DRM_MODE_FLAG_INTERLACE;
66
67 if (timing->vmode & FB_VMODE_DOUBLE)
68 mode->flags |= DRM_MODE_FLAG_DBLSCAN;
60} 69}
61 70
62/* convert drm_display_mode to exynos_video_timings */ 71/* convert drm_display_mode to exynos_video_timings */
@@ -69,7 +78,7 @@ convert_to_video_timing(struct fb_videomode *timing,
69 memset(timing, 0, sizeof(*timing)); 78 memset(timing, 0, sizeof(*timing));
70 79
71 timing->pixclock = mode->clock * 1000; 80 timing->pixclock = mode->clock * 1000;
72 timing->refresh = mode->vrefresh; 81 timing->refresh = drm_mode_vrefresh(mode);
73 82
74 timing->xres = mode->hdisplay; 83 timing->xres = mode->hdisplay;
75 timing->left_margin = mode->hsync_start - mode->hdisplay; 84 timing->left_margin = mode->hsync_start - mode->hdisplay;
@@ -92,15 +101,16 @@ convert_to_video_timing(struct fb_videomode *timing,
92 101
93static int exynos_drm_connector_get_modes(struct drm_connector *connector) 102static int exynos_drm_connector_get_modes(struct drm_connector *connector)
94{ 103{
95 struct exynos_drm_manager *manager = 104 struct exynos_drm_connector *exynos_connector =
96 exynos_drm_get_manager(connector->encoder); 105 to_exynos_connector(connector);
97 struct exynos_drm_display *display = manager->display; 106 struct exynos_drm_manager *manager = exynos_connector->manager;
107 struct exynos_drm_display_ops *display_ops = manager->display_ops;
98 unsigned int count; 108 unsigned int count;
99 109
100 DRM_DEBUG_KMS("%s\n", __FILE__); 110 DRM_DEBUG_KMS("%s\n", __FILE__);
101 111
102 if (!display) { 112 if (!display_ops) {
103 DRM_DEBUG_KMS("display is null.\n"); 113 DRM_DEBUG_KMS("display_ops is null.\n");
104 return 0; 114 return 0;
105 } 115 }
106 116
@@ -112,7 +122,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
112 * P.S. in case of lcd panel, count is always 1 if success 122 * P.S. in case of lcd panel, count is always 1 if success
113 * because lcd panel has only one mode. 123 * because lcd panel has only one mode.
114 */ 124 */
115 if (display->get_edid) { 125 if (display_ops->get_edid) {
116 int ret; 126 int ret;
117 void *edid; 127 void *edid;
118 128
@@ -122,7 +132,7 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
122 return 0; 132 return 0;
123 } 133 }
124 134
125 ret = display->get_edid(manager->dev, connector, 135 ret = display_ops->get_edid(manager->dev, connector,
126 edid, MAX_EDID); 136 edid, MAX_EDID);
127 if (ret < 0) { 137 if (ret < 0) {
128 DRM_ERROR("failed to get edid data.\n"); 138 DRM_ERROR("failed to get edid data.\n");
@@ -140,8 +150,8 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
140 struct drm_display_mode *mode = drm_mode_create(connector->dev); 150 struct drm_display_mode *mode = drm_mode_create(connector->dev);
141 struct fb_videomode *timing; 151 struct fb_videomode *timing;
142 152
143 if (display->get_timing) 153 if (display_ops->get_timing)
144 timing = display->get_timing(manager->dev); 154 timing = display_ops->get_timing(manager->dev);
145 else { 155 else {
146 drm_mode_destroy(connector->dev, mode); 156 drm_mode_destroy(connector->dev, mode);
147 return 0; 157 return 0;
@@ -162,9 +172,10 @@ static int exynos_drm_connector_get_modes(struct drm_connector *connector)
162static int exynos_drm_connector_mode_valid(struct drm_connector *connector, 172static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
163 struct drm_display_mode *mode) 173 struct drm_display_mode *mode)
164{ 174{
165 struct exynos_drm_manager *manager = 175 struct exynos_drm_connector *exynos_connector =
166 exynos_drm_get_manager(connector->encoder); 176 to_exynos_connector(connector);
167 struct exynos_drm_display *display = manager->display; 177 struct exynos_drm_manager *manager = exynos_connector->manager;
178 struct exynos_drm_display_ops *display_ops = manager->display_ops;
168 struct fb_videomode timing; 179 struct fb_videomode timing;
169 int ret = MODE_BAD; 180 int ret = MODE_BAD;
170 181
@@ -172,8 +183,8 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
172 183
173 convert_to_video_timing(&timing, mode); 184 convert_to_video_timing(&timing, mode);
174 185
175 if (display && display->check_timing) 186 if (display_ops && display_ops->check_timing)
176 if (!display->check_timing(manager->dev, (void *)&timing)) 187 if (!display_ops->check_timing(manager->dev, (void *)&timing))
177 ret = MODE_OK; 188 ret = MODE_OK;
178 189
179 return ret; 190 return ret;
@@ -181,9 +192,25 @@ static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
181 192
182struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector) 193struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
183{ 194{
195 struct drm_device *dev = connector->dev;
196 struct exynos_drm_connector *exynos_connector =
197 to_exynos_connector(connector);
198 struct drm_mode_object *obj;
199 struct drm_encoder *encoder;
200
184 DRM_DEBUG_KMS("%s\n", __FILE__); 201 DRM_DEBUG_KMS("%s\n", __FILE__);
185 202
186 return connector->encoder; 203 obj = drm_mode_object_find(dev, exynos_connector->encoder_id,
204 DRM_MODE_OBJECT_ENCODER);
205 if (!obj) {
206 DRM_DEBUG_KMS("Unknown ENCODER ID %d\n",
207 exynos_connector->encoder_id);
208 return NULL;
209 }
210
211 encoder = obj_to_encoder(obj);
212
213 return encoder;
187} 214}
188 215
189static struct drm_connector_helper_funcs exynos_connector_helper_funcs = { 216static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
@@ -196,15 +223,17 @@ static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
196static enum drm_connector_status 223static enum drm_connector_status
197exynos_drm_connector_detect(struct drm_connector *connector, bool force) 224exynos_drm_connector_detect(struct drm_connector *connector, bool force)
198{ 225{
199 struct exynos_drm_manager *manager = 226 struct exynos_drm_connector *exynos_connector =
200 exynos_drm_get_manager(connector->encoder); 227 to_exynos_connector(connector);
201 struct exynos_drm_display *display = manager->display; 228 struct exynos_drm_manager *manager = exynos_connector->manager;
229 struct exynos_drm_display_ops *display_ops =
230 manager->display_ops;
202 enum drm_connector_status status = connector_status_disconnected; 231 enum drm_connector_status status = connector_status_disconnected;
203 232
204 DRM_DEBUG_KMS("%s\n", __FILE__); 233 DRM_DEBUG_KMS("%s\n", __FILE__);
205 234
206 if (display && display->is_connected) { 235 if (display_ops && display_ops->is_connected) {
207 if (display->is_connected(manager->dev)) 236 if (display_ops->is_connected(manager->dev))
208 status = connector_status_connected; 237 status = connector_status_connected;
209 else 238 else
210 status = connector_status_disconnected; 239 status = connector_status_disconnected;
@@ -251,9 +280,11 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
251 280
252 connector = &exynos_connector->drm_connector; 281 connector = &exynos_connector->drm_connector;
253 282
254 switch (manager->display->type) { 283 switch (manager->display_ops->type) {
255 case EXYNOS_DISPLAY_TYPE_HDMI: 284 case EXYNOS_DISPLAY_TYPE_HDMI:
256 type = DRM_MODE_CONNECTOR_HDMIA; 285 type = DRM_MODE_CONNECTOR_HDMIA;
286 connector->interlace_allowed = true;
287 connector->polled = DRM_CONNECTOR_POLL_HPD;
257 break; 288 break;
258 default: 289 default:
259 type = DRM_MODE_CONNECTOR_Unknown; 290 type = DRM_MODE_CONNECTOR_Unknown;
@@ -267,7 +298,10 @@ struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
267 if (err) 298 if (err)
268 goto err_connector; 299 goto err_connector;
269 300
301 exynos_connector->encoder_id = encoder->base.id;
302 exynos_connector->manager = manager;
270 connector->encoder = encoder; 303 connector->encoder = encoder;
304
271 err = drm_mode_connector_attach_encoder(connector, encoder); 305 err = drm_mode_connector_attach_encoder(connector, encoder);
272 if (err) { 306 if (err) {
273 DRM_ERROR("failed to attach a connector to a encoder\n"); 307 DRM_ERROR("failed to attach a connector to a encoder\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 9337e5e2dbb6..ee43cc220853 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -29,36 +29,17 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc_helper.h" 30#include "drm_crtc_helper.h"
31 31
32#include "exynos_drm_crtc.h"
32#include "exynos_drm_drv.h" 33#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 34#include "exynos_drm_fb.h"
34#include "exynos_drm_encoder.h" 35#include "exynos_drm_encoder.h"
36#include "exynos_drm_gem.h"
35#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
36 38
37#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\ 39#define to_exynos_crtc(x) container_of(x, struct exynos_drm_crtc,\
38 drm_crtc) 40 drm_crtc)
39 41
40/* 42/*
41 * Exynos specific crtc postion structure.
42 *
43 * @fb_x: offset x on a framebuffer to be displyed
44 * - the unit is screen coordinates.
45 * @fb_y: offset y on a framebuffer to be displayed
46 * - the unit is screen coordinates.
47 * @crtc_x: offset x on hardware screen.
48 * @crtc_y: offset y on hardware screen.
49 * @crtc_w: width of hardware screen.
50 * @crtc_h: height of hardware screen.
51 */
52struct exynos_drm_crtc_pos {
53 unsigned int fb_x;
54 unsigned int fb_y;
55 unsigned int crtc_x;
56 unsigned int crtc_y;
57 unsigned int crtc_w;
58 unsigned int crtc_h;
59};
60
61/*
62 * Exynos specific crtc structure. 43 * Exynos specific crtc structure.
63 * 44 *
64 * @drm_crtc: crtc object. 45 * @drm_crtc: crtc object.
@@ -85,30 +66,31 @@ static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
85 66
86 exynos_drm_fn_encoder(crtc, overlay, 67 exynos_drm_fn_encoder(crtc, overlay,
87 exynos_drm_encoder_crtc_mode_set); 68 exynos_drm_encoder_crtc_mode_set);
88 exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit); 69 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
70 exynos_drm_encoder_crtc_commit);
89} 71}
90 72
91static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay, 73int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
92 struct drm_framebuffer *fb, 74 struct drm_framebuffer *fb,
93 struct drm_display_mode *mode, 75 struct drm_display_mode *mode,
94 struct exynos_drm_crtc_pos *pos) 76 struct exynos_drm_crtc_pos *pos)
95{ 77{
96 struct exynos_drm_buf_entry *entry; 78 struct exynos_drm_gem_buf *buffer;
97 unsigned int actual_w; 79 unsigned int actual_w;
98 unsigned int actual_h; 80 unsigned int actual_h;
99 81
100 entry = exynos_drm_fb_get_buf(fb); 82 buffer = exynos_drm_fb_get_buf(fb);
101 if (!entry) { 83 if (!buffer) {
102 DRM_LOG_KMS("entry is null.\n"); 84 DRM_LOG_KMS("buffer is null.\n");
103 return -EFAULT; 85 return -EFAULT;
104 } 86 }
105 87
106 overlay->paddr = entry->paddr; 88 overlay->dma_addr = buffer->dma_addr;
107 overlay->vaddr = entry->vaddr; 89 overlay->vaddr = buffer->kvaddr;
108 90
109 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 91 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
110 (unsigned long)overlay->vaddr, 92 (unsigned long)overlay->vaddr,
111 (unsigned long)overlay->paddr); 93 (unsigned long)overlay->dma_addr);
112 94
113 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w); 95 actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
114 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h); 96 actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
@@ -171,9 +153,26 @@ static int exynos_drm_crtc_update(struct drm_crtc *crtc)
171 153
172static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode) 154static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
173{ 155{
174 DRM_DEBUG_KMS("%s\n", __FILE__); 156 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
175 157
176 /* TODO */ 158 DRM_DEBUG_KMS("crtc[%d] mode[%d]\n", crtc->base.id, mode);
159
160 switch (mode) {
161 case DRM_MODE_DPMS_ON:
162 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
163 exynos_drm_encoder_crtc_commit);
164 break;
165 case DRM_MODE_DPMS_STANDBY:
166 case DRM_MODE_DPMS_SUSPEND:
167 case DRM_MODE_DPMS_OFF:
168 /* TODO */
169 exynos_drm_fn_encoder(crtc, NULL,
170 exynos_drm_encoder_crtc_disable);
171 break;
172 default:
173 DRM_DEBUG_KMS("unspecified mode %d\n", mode);
174 break;
175 }
177} 176}
178 177
179static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 178static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
@@ -185,9 +184,12 @@ static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
185 184
186static void exynos_drm_crtc_commit(struct drm_crtc *crtc) 185static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
187{ 186{
187 struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
188
188 DRM_DEBUG_KMS("%s\n", __FILE__); 189 DRM_DEBUG_KMS("%s\n", __FILE__);
189 190
190 /* drm framework doesn't check NULL. */ 191 exynos_drm_fn_encoder(crtc, &exynos_crtc->pipe,
192 exynos_drm_encoder_crtc_commit);
191} 193}
192 194
193static bool 195static bool
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.h b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
index c584042d6d2c..25f72a62cb88 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.h
@@ -35,4 +35,29 @@ int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc); 35int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc); 36void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
37 37
38/*
39 * Exynos specific crtc postion structure.
40 *
41 * @fb_x: offset x on a framebuffer to be displyed
42 * - the unit is screen coordinates.
43 * @fb_y: offset y on a framebuffer to be displayed
44 * - the unit is screen coordinates.
45 * @crtc_x: offset x on hardware screen.
46 * @crtc_y: offset y on hardware screen.
47 * @crtc_w: width of hardware screen.
48 * @crtc_h: height of hardware screen.
49 */
50struct exynos_drm_crtc_pos {
51 unsigned int fb_x;
52 unsigned int fb_y;
53 unsigned int crtc_x;
54 unsigned int crtc_y;
55 unsigned int crtc_w;
56 unsigned int crtc_h;
57};
58
59int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
60 struct drm_framebuffer *fb,
61 struct drm_display_mode *mode,
62 struct exynos_drm_crtc_pos *pos);
38#endif 63#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 83810cbe3c17..53e2216de61d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -27,6 +27,7 @@
27 27
28#include "drmP.h" 28#include "drmP.h"
29#include "drm.h" 29#include "drm.h"
30#include "drm_crtc_helper.h"
30 31
31#include <drm/exynos_drm.h> 32#include <drm/exynos_drm.h>
32 33
@@ -61,6 +62,9 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
61 62
62 drm_mode_config_init(dev); 63 drm_mode_config_init(dev);
63 64
65 /* init kms poll for handling hpd */
66 drm_kms_helper_poll_init(dev);
67
64 exynos_drm_mode_config_init(dev); 68 exynos_drm_mode_config_init(dev);
65 69
66 /* 70 /*
@@ -116,6 +120,7 @@ static int exynos_drm_unload(struct drm_device *dev)
116 exynos_drm_fbdev_fini(dev); 120 exynos_drm_fbdev_fini(dev);
117 exynos_drm_device_unregister(dev); 121 exynos_drm_device_unregister(dev);
118 drm_vblank_cleanup(dev); 122 drm_vblank_cleanup(dev);
123 drm_kms_helper_poll_fini(dev);
119 drm_mode_config_cleanup(dev); 124 drm_mode_config_cleanup(dev);
120 kfree(dev->dev_private); 125 kfree(dev->dev_private);
121 126
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index c03683f2ae72..5e02e6ecc2e0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -29,6 +29,7 @@
29#ifndef _EXYNOS_DRM_DRV_H_ 29#ifndef _EXYNOS_DRM_DRV_H_
30#define _EXYNOS_DRM_DRV_H_ 30#define _EXYNOS_DRM_DRV_H_
31 31
32#include <linux/module.h>
32#include "drm.h" 33#include "drm.h"
33 34
34#define MAX_CRTC 2 35#define MAX_CRTC 2
@@ -79,8 +80,8 @@ struct exynos_drm_overlay_ops {
79 * @scan_flag: interlace or progressive way. 80 * @scan_flag: interlace or progressive way.
80 * (it could be DRM_MODE_FLAG_*) 81 * (it could be DRM_MODE_FLAG_*)
81 * @bpp: pixel size.(in bit) 82 * @bpp: pixel size.(in bit)
82 * @paddr: bus(accessed by dma) physical memory address to this overlay 83 * @dma_addr: bus(accessed by dma) address to the memory region allocated
83 * and this is physically continuous. 84 * for a overlay.
84 * @vaddr: virtual memory addresss to this overlay. 85 * @vaddr: virtual memory addresss to this overlay.
85 * @default_win: a window to be enabled. 86 * @default_win: a window to be enabled.
86 * @color_key: color key on or off. 87 * @color_key: color key on or off.
@@ -108,7 +109,7 @@ struct exynos_drm_overlay {
108 unsigned int scan_flag; 109 unsigned int scan_flag;
109 unsigned int bpp; 110 unsigned int bpp;
110 unsigned int pitch; 111 unsigned int pitch;
111 dma_addr_t paddr; 112 dma_addr_t dma_addr;
112 void __iomem *vaddr; 113 void __iomem *vaddr;
113 114
114 bool default_win; 115 bool default_win;
@@ -130,7 +131,7 @@ struct exynos_drm_overlay {
130 * @check_timing: check if timing is valid or not. 131 * @check_timing: check if timing is valid or not.
131 * @power_on: display device on or off. 132 * @power_on: display device on or off.
132 */ 133 */
133struct exynos_drm_display { 134struct exynos_drm_display_ops {
134 enum exynos_drm_output_type type; 135 enum exynos_drm_output_type type;
135 bool (*is_connected)(struct device *dev); 136 bool (*is_connected)(struct device *dev);
136 int (*get_edid)(struct device *dev, struct drm_connector *connector, 137 int (*get_edid)(struct device *dev, struct drm_connector *connector,
@@ -146,12 +147,14 @@ struct exynos_drm_display {
146 * @mode_set: convert drm_display_mode to hw specific display mode and 147 * @mode_set: convert drm_display_mode to hw specific display mode and
147 * would be called by encoder->mode_set(). 148 * would be called by encoder->mode_set().
148 * @commit: set current hw specific display mode to hw. 149 * @commit: set current hw specific display mode to hw.
150 * @disable: disable hardware specific display mode.
149 * @enable_vblank: specific driver callback for enabling vblank interrupt. 151 * @enable_vblank: specific driver callback for enabling vblank interrupt.
150 * @disable_vblank: specific driver callback for disabling vblank interrupt. 152 * @disable_vblank: specific driver callback for disabling vblank interrupt.
151 */ 153 */
152struct exynos_drm_manager_ops { 154struct exynos_drm_manager_ops {
153 void (*mode_set)(struct device *subdrv_dev, void *mode); 155 void (*mode_set)(struct device *subdrv_dev, void *mode);
154 void (*commit)(struct device *subdrv_dev); 156 void (*commit)(struct device *subdrv_dev);
157 void (*disable)(struct device *subdrv_dev);
155 int (*enable_vblank)(struct device *subdrv_dev); 158 int (*enable_vblank)(struct device *subdrv_dev);
156 void (*disable_vblank)(struct device *subdrv_dev); 159 void (*disable_vblank)(struct device *subdrv_dev);
157}; 160};
@@ -178,7 +181,7 @@ struct exynos_drm_manager {
178 int pipe; 181 int pipe;
179 struct exynos_drm_manager_ops *ops; 182 struct exynos_drm_manager_ops *ops;
180 struct exynos_drm_overlay_ops *overlay_ops; 183 struct exynos_drm_overlay_ops *overlay_ops;
181 struct exynos_drm_display *display; 184 struct exynos_drm_display_ops *display_ops;
182}; 185};
183 186
184/* 187/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 7cf6fa86a67e..153061415baf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -53,15 +53,36 @@ static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
53 struct drm_device *dev = encoder->dev; 53 struct drm_device *dev = encoder->dev;
54 struct drm_connector *connector; 54 struct drm_connector *connector;
55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 55 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
56 struct exynos_drm_manager_ops *manager_ops = manager->ops;
56 57
57 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode); 58 DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
58 59
60 switch (mode) {
61 case DRM_MODE_DPMS_ON:
62 if (manager_ops && manager_ops->commit)
63 manager_ops->commit(manager->dev);
64 break;
65 case DRM_MODE_DPMS_STANDBY:
66 case DRM_MODE_DPMS_SUSPEND:
67 case DRM_MODE_DPMS_OFF:
68 /* TODO */
69 if (manager_ops && manager_ops->disable)
70 manager_ops->disable(manager->dev);
71 break;
72 default:
73 DRM_ERROR("unspecified mode %d\n", mode);
74 break;
75 }
76
59 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 77 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
60 if (connector->encoder == encoder) { 78 if (connector->encoder == encoder) {
61 struct exynos_drm_display *display = manager->display; 79 struct exynos_drm_display_ops *display_ops =
80 manager->display_ops;
62 81
63 if (display && display->power_on) 82 DRM_DEBUG_KMS("connector[%d] dpms[%d]\n",
64 display->power_on(manager->dev, mode); 83 connector->base.id, mode);
84 if (display_ops && display_ops->power_on)
85 display_ops->power_on(manager->dev, mode);
65 } 86 }
66 } 87 }
67} 88}
@@ -116,15 +137,11 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
116{ 137{
117 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder); 138 struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
118 struct exynos_drm_manager_ops *manager_ops = manager->ops; 139 struct exynos_drm_manager_ops *manager_ops = manager->ops;
119 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
120 140
121 DRM_DEBUG_KMS("%s\n", __FILE__); 141 DRM_DEBUG_KMS("%s\n", __FILE__);
122 142
123 if (manager_ops && manager_ops->commit) 143 if (manager_ops && manager_ops->commit)
124 manager_ops->commit(manager->dev); 144 manager_ops->commit(manager->dev);
125
126 if (overlay_ops && overlay_ops->commit)
127 overlay_ops->commit(manager->dev);
128} 145}
129 146
130static struct drm_crtc * 147static struct drm_crtc *
@@ -208,10 +225,23 @@ void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
208{ 225{
209 struct drm_device *dev = crtc->dev; 226 struct drm_device *dev = crtc->dev;
210 struct drm_encoder *encoder; 227 struct drm_encoder *encoder;
228 struct exynos_drm_private *private = dev->dev_private;
229 struct exynos_drm_manager *manager;
211 230
212 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { 231 list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
213 if (encoder->crtc != crtc) 232 /*
214 continue; 233 * if crtc is detached from encoder, check pipe,
234 * otherwise check crtc attached to encoder
235 */
236 if (!encoder->crtc) {
237 manager = to_exynos_encoder(encoder)->manager;
238 if (manager->pipe < 0 ||
239 private->crtc[manager->pipe] != crtc)
240 continue;
241 } else {
242 if (encoder->crtc != crtc)
243 continue;
244 }
215 245
216 fn(encoder, data); 246 fn(encoder, data);
217 } 247 }
@@ -250,8 +280,18 @@ void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
250 struct exynos_drm_manager *manager = 280 struct exynos_drm_manager *manager =
251 to_exynos_encoder(encoder)->manager; 281 to_exynos_encoder(encoder)->manager;
252 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 282 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
283 int crtc = *(int *)data;
284
285 DRM_DEBUG_KMS("%s\n", __FILE__);
286
287 /*
288 * when crtc is detached from encoder, this pipe is used
289 * to select manager operation
290 */
291 manager->pipe = crtc;
253 292
254 overlay_ops->commit(manager->dev); 293 if (overlay_ops && overlay_ops->commit)
294 overlay_ops->commit(manager->dev);
255} 295}
256 296
257void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data) 297void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
@@ -261,7 +301,28 @@ void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
261 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops; 301 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
262 struct exynos_drm_overlay *overlay = data; 302 struct exynos_drm_overlay *overlay = data;
263 303
264 overlay_ops->mode_set(manager->dev, overlay); 304 if (overlay_ops && overlay_ops->mode_set)
305 overlay_ops->mode_set(manager->dev, overlay);
306}
307
308void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data)
309{
310 struct exynos_drm_manager *manager =
311 to_exynos_encoder(encoder)->manager;
312 struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
313
314 DRM_DEBUG_KMS("\n");
315
316 if (overlay_ops && overlay_ops->disable)
317 overlay_ops->disable(manager->dev);
318
319 /*
320 * crtc is already detached from encoder and last
321 * function for detaching is properly done, so
322 * clear pipe from manager to prevent repeated call
323 */
324 if (!encoder->crtc)
325 manager->pipe = -1;
265} 326}
266 327
267MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>"); 328MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.h b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
index 5ecd645d06a9..a22acfbf0e4e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.h
@@ -41,5 +41,6 @@ void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data); 41void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data); 42void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data); 43void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
44void exynos_drm_encoder_crtc_disable(struct drm_encoder *encoder, void *data);
44 45
45#endif 46#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 48d29cfd5240..5bf4a1ac7f82 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -29,7 +29,9 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "drm_crtc.h" 30#include "drm_crtc.h"
31#include "drm_crtc_helper.h" 31#include "drm_crtc_helper.h"
32#include "drm_fb_helper.h"
32 33
34#include "exynos_drm_drv.h"
33#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
34#include "exynos_drm_buf.h" 36#include "exynos_drm_buf.h"
35#include "exynos_drm_gem.h" 37#include "exynos_drm_gem.h"
@@ -41,14 +43,14 @@
41 * 43 *
42 * @fb: drm framebuffer obejct. 44 * @fb: drm framebuffer obejct.
43 * @exynos_gem_obj: exynos specific gem object containing a gem object. 45 * @exynos_gem_obj: exynos specific gem object containing a gem object.
44 * @entry: pointer to exynos drm buffer entry object. 46 * @buffer: pointer to exynos_drm_gem_buffer object.
45 * - containing only the information to physically continuous memory 47 * - contain the memory information to memory region allocated
46 * region allocated at default framebuffer creation. 48 * at default framebuffer creation.
47 */ 49 */
48struct exynos_drm_fb { 50struct exynos_drm_fb {
49 struct drm_framebuffer fb; 51 struct drm_framebuffer fb;
50 struct exynos_drm_gem_obj *exynos_gem_obj; 52 struct exynos_drm_gem_obj *exynos_gem_obj;
51 struct exynos_drm_buf_entry *entry; 53 struct exynos_drm_gem_buf *buffer;
52}; 54};
53 55
54static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 56static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
@@ -63,8 +65,8 @@ static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
63 * default framebuffer has no gem object so 65 * default framebuffer has no gem object so
64 * a buffer of the default framebuffer should be released at here. 66 * a buffer of the default framebuffer should be released at here.
65 */ 67 */
66 if (!exynos_fb->exynos_gem_obj && exynos_fb->entry) 68 if (!exynos_fb->exynos_gem_obj && exynos_fb->buffer)
67 exynos_drm_buf_destroy(fb->dev, exynos_fb->entry); 69 exynos_drm_buf_destroy(fb->dev, exynos_fb->buffer);
68 70
69 kfree(exynos_fb); 71 kfree(exynos_fb);
70 exynos_fb = NULL; 72 exynos_fb = NULL;
@@ -143,29 +145,29 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
143 */ 145 */
144 if (!mode_cmd->handle) { 146 if (!mode_cmd->handle) {
145 if (!file_priv) { 147 if (!file_priv) {
146 struct exynos_drm_buf_entry *entry; 148 struct exynos_drm_gem_buf *buffer;
147 149
148 /* 150 /*
149 * in case that file_priv is NULL, it allocates 151 * in case that file_priv is NULL, it allocates
150 * only buffer and this buffer would be used 152 * only buffer and this buffer would be used
151 * for default framebuffer. 153 * for default framebuffer.
152 */ 154 */
153 entry = exynos_drm_buf_create(dev, size); 155 buffer = exynos_drm_buf_create(dev, size);
154 if (IS_ERR(entry)) { 156 if (IS_ERR(buffer)) {
155 ret = PTR_ERR(entry); 157 ret = PTR_ERR(buffer);
156 goto err_buffer; 158 goto err_buffer;
157 } 159 }
158 160
159 exynos_fb->entry = entry; 161 exynos_fb->buffer = buffer;
160 162
161 DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n", 163 DRM_LOG_KMS("default: dma_addr = 0x%lx, size = 0x%x\n",
162 (unsigned long)entry->paddr, size); 164 (unsigned long)buffer->dma_addr, size);
163 165
164 goto out; 166 goto out;
165 } else { 167 } else {
166 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, 168 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
167 size, 169 &mode_cmd->handle,
168 &mode_cmd->handle); 170 size);
169 if (IS_ERR(exynos_gem_obj)) { 171 if (IS_ERR(exynos_gem_obj)) {
170 ret = PTR_ERR(exynos_gem_obj); 172 ret = PTR_ERR(exynos_gem_obj);
171 goto err_buffer; 173 goto err_buffer;
@@ -189,10 +191,10 @@ exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
189 * so that default framebuffer has no its own gem object, 191 * so that default framebuffer has no its own gem object,
190 * only its own buffer object. 192 * only its own buffer object.
191 */ 193 */
192 exynos_fb->entry = exynos_gem_obj->entry; 194 exynos_fb->buffer = exynos_gem_obj->buffer;
193 195
194 DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n", 196 DRM_LOG_KMS("dma_addr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
195 (unsigned long)exynos_fb->entry->paddr, size, 197 (unsigned long)exynos_fb->buffer->dma_addr, size,
196 (unsigned int)&exynos_gem_obj->base); 198 (unsigned int)&exynos_gem_obj->base);
197 199
198out: 200out:
@@ -220,26 +222,36 @@ struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
220 return exynos_drm_fb_init(file_priv, dev, mode_cmd); 222 return exynos_drm_fb_init(file_priv, dev, mode_cmd);
221} 223}
222 224
223struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb) 225struct exynos_drm_gem_buf *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
224{ 226{
225 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 227 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
226 struct exynos_drm_buf_entry *entry; 228 struct exynos_drm_gem_buf *buffer;
227 229
228 DRM_DEBUG_KMS("%s\n", __FILE__); 230 DRM_DEBUG_KMS("%s\n", __FILE__);
229 231
230 entry = exynos_fb->entry; 232 buffer = exynos_fb->buffer;
231 if (!entry) 233 if (!buffer)
232 return NULL; 234 return NULL;
233 235
234 DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n", 236 DRM_DEBUG_KMS("vaddr = 0x%lx, dma_addr = 0x%lx\n",
235 (unsigned long)entry->vaddr, 237 (unsigned long)buffer->kvaddr,
236 (unsigned long)entry->paddr); 238 (unsigned long)buffer->dma_addr);
237 239
238 return entry; 240 return buffer;
241}
242
243static void exynos_drm_output_poll_changed(struct drm_device *dev)
244{
245 struct exynos_drm_private *private = dev->dev_private;
246 struct drm_fb_helper *fb_helper = private->fb_helper;
247
248 if (fb_helper)
249 drm_fb_helper_hotplug_event(fb_helper);
239} 250}
240 251
241static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = { 252static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
242 .fb_create = exynos_drm_fb_create, 253 .fb_create = exynos_drm_fb_create,
254 .output_poll_changed = exynos_drm_output_poll_changed,
243}; 255};
244 256
245void exynos_drm_mode_config_init(struct drm_device *dev) 257void exynos_drm_mode_config_init(struct drm_device *dev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 1f4b3d1a7713..836f41008187 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -33,6 +33,7 @@
33 33
34#include "exynos_drm_drv.h" 34#include "exynos_drm_drv.h"
35#include "exynos_drm_fb.h" 35#include "exynos_drm_fb.h"
36#include "exynos_drm_gem.h"
36#include "exynos_drm_buf.h" 37#include "exynos_drm_buf.h"
37 38
38#define MAX_CONNECTOR 4 39#define MAX_CONNECTOR 4
@@ -85,15 +86,13 @@ static struct fb_ops exynos_drm_fb_ops = {
85}; 86};
86 87
87static int exynos_drm_fbdev_update(struct drm_fb_helper *helper, 88static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
88 struct drm_framebuffer *fb, 89 struct drm_framebuffer *fb)
89 unsigned int fb_width,
90 unsigned int fb_height)
91{ 90{
92 struct fb_info *fbi = helper->fbdev; 91 struct fb_info *fbi = helper->fbdev;
93 struct drm_device *dev = helper->dev; 92 struct drm_device *dev = helper->dev;
94 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper); 93 struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
95 struct exynos_drm_buf_entry *entry; 94 struct exynos_drm_gem_buf *buffer;
96 unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3); 95 unsigned int size = fb->width * fb->height * (fb->bits_per_pixel >> 3);
97 unsigned long offset; 96 unsigned long offset;
98 97
99 DRM_DEBUG_KMS("%s\n", __FILE__); 98 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -101,20 +100,20 @@ static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
101 exynos_fb->fb = fb; 100 exynos_fb->fb = fb;
102 101
103 drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth); 102 drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
104 drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height); 103 drm_fb_helper_fill_var(fbi, helper, fb->width, fb->height);
105 104
106 entry = exynos_drm_fb_get_buf(fb); 105 buffer = exynos_drm_fb_get_buf(fb);
107 if (!entry) { 106 if (!buffer) {
108 DRM_LOG_KMS("entry is null.\n"); 107 DRM_LOG_KMS("buffer is null.\n");
109 return -EFAULT; 108 return -EFAULT;
110 } 109 }
111 110
112 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3); 111 offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
113 offset += fbi->var.yoffset * fb->pitch; 112 offset += fbi->var.yoffset * fb->pitch;
114 113
115 dev->mode_config.fb_base = entry->paddr; 114 dev->mode_config.fb_base = (resource_size_t)buffer->dma_addr;
116 fbi->screen_base = entry->vaddr + offset; 115 fbi->screen_base = buffer->kvaddr + offset;
117 fbi->fix.smem_start = entry->paddr + offset; 116 fbi->fix.smem_start = (unsigned long)(buffer->dma_addr + offset);
118 fbi->screen_size = size; 117 fbi->screen_size = size;
119 fbi->fix.smem_len = size; 118 fbi->fix.smem_len = size;
120 119
@@ -171,8 +170,7 @@ static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
171 goto out; 170 goto out;
172 } 171 }
173 172
174 ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 173 ret = exynos_drm_fbdev_update(helper, helper->fb);
175 sizes->fb_height);
176 if (ret < 0) 174 if (ret < 0)
177 fb_dealloc_cmap(&fbi->cmap); 175 fb_dealloc_cmap(&fbi->cmap);
178 176
@@ -235,8 +233,7 @@ static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
235 } 233 }
236 234
237 helper->fb = exynos_fbdev->fb; 235 helper->fb = exynos_fbdev->fb;
238 return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width, 236 return exynos_drm_fbdev_update(helper, helper->fb);
239 sizes->fb_height);
240} 237}
241 238
242static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper, 239static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
@@ -405,6 +402,18 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
405 fb_helper = private->fb_helper; 402 fb_helper = private->fb_helper;
406 403
407 if (fb_helper) { 404 if (fb_helper) {
405 struct list_head temp_list;
406
407 INIT_LIST_HEAD(&temp_list);
408
409 /*
410 * fb_helper is reintialized but kernel fb is reused
411 * so kernel_fb_list need to be backuped and restored
412 */
413 if (!list_empty(&fb_helper->kernel_fb_list))
414 list_replace_init(&fb_helper->kernel_fb_list,
415 &temp_list);
416
408 drm_fb_helper_fini(fb_helper); 417 drm_fb_helper_fini(fb_helper);
409 418
410 ret = drm_fb_helper_init(dev, fb_helper, 419 ret = drm_fb_helper_init(dev, fb_helper,
@@ -414,6 +423,9 @@ int exynos_drm_fbdev_reinit(struct drm_device *dev)
414 return ret; 423 return ret;
415 } 424 }
416 425
426 if (!list_empty(&temp_list))
427 list_replace(&temp_list, &fb_helper->kernel_fb_list);
428
417 ret = drm_fb_helper_single_add_all_connectors(fb_helper); 429 ret = drm_fb_helper_single_add_all_connectors(fb_helper);
418 if (ret < 0) { 430 if (ret < 0) {
419 DRM_ERROR("failed to add fb helper to connectors\n"); 431 DRM_ERROR("failed to add fb helper to connectors\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 4659c88cdd9b..db3b3d9e731d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -64,7 +64,7 @@ struct fimd_win_data {
64 unsigned int fb_width; 64 unsigned int fb_width;
65 unsigned int fb_height; 65 unsigned int fb_height;
66 unsigned int bpp; 66 unsigned int bpp;
67 dma_addr_t paddr; 67 dma_addr_t dma_addr;
68 void __iomem *vaddr; 68 void __iomem *vaddr;
69 unsigned int buf_offsize; 69 unsigned int buf_offsize;
70 unsigned int line_size; /* bytes */ 70 unsigned int line_size; /* bytes */
@@ -124,7 +124,7 @@ static int fimd_display_power_on(struct device *dev, int mode)
124 return 0; 124 return 0;
125} 125}
126 126
127static struct exynos_drm_display fimd_display = { 127static struct exynos_drm_display_ops fimd_display_ops = {
128 .type = EXYNOS_DISPLAY_TYPE_LCD, 128 .type = EXYNOS_DISPLAY_TYPE_LCD,
129 .is_connected = fimd_display_is_connected, 129 .is_connected = fimd_display_is_connected,
130 .get_timing = fimd_get_timing, 130 .get_timing = fimd_get_timing,
@@ -177,6 +177,40 @@ static void fimd_commit(struct device *dev)
177 writel(val, ctx->regs + VIDCON0); 177 writel(val, ctx->regs + VIDCON0);
178} 178}
179 179
180static void fimd_disable(struct device *dev)
181{
182 struct fimd_context *ctx = get_fimd_context(dev);
183 struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
184 struct drm_device *drm_dev = subdrv->drm_dev;
185 struct exynos_drm_manager *manager = &subdrv->manager;
186 u32 val;
187
188 DRM_DEBUG_KMS("%s\n", __FILE__);
189
190 /* fimd dma off */
191 val = readl(ctx->regs + VIDCON0);
192 val &= ~(VIDCON0_ENVID | VIDCON0_ENVID_F);
193 writel(val, ctx->regs + VIDCON0);
194
195 /*
196 * if vblank is enabled status with dma off then
197 * it disables vsync interrupt.
198 */
199 if (drm_dev->vblank_enabled[manager->pipe] &&
200 atomic_read(&drm_dev->vblank_refcount[manager->pipe])) {
201 drm_vblank_put(drm_dev, manager->pipe);
202
203 /*
204 * if vblank_disable_allowed is 0 then disable
205 * vsync interrupt right now else the vsync interrupt
206 * would be disabled by drm timer once a current process
207 * gives up ownershop of vblank event.
208 */
209 if (!drm_dev->vblank_disable_allowed)
210 drm_vblank_off(drm_dev, manager->pipe);
211 }
212}
213
180static int fimd_enable_vblank(struct device *dev) 214static int fimd_enable_vblank(struct device *dev)
181{ 215{
182 struct fimd_context *ctx = get_fimd_context(dev); 216 struct fimd_context *ctx = get_fimd_context(dev);
@@ -220,6 +254,7 @@ static void fimd_disable_vblank(struct device *dev)
220 254
221static struct exynos_drm_manager_ops fimd_manager_ops = { 255static struct exynos_drm_manager_ops fimd_manager_ops = {
222 .commit = fimd_commit, 256 .commit = fimd_commit,
257 .disable = fimd_disable,
223 .enable_vblank = fimd_enable_vblank, 258 .enable_vblank = fimd_enable_vblank,
224 .disable_vblank = fimd_disable_vblank, 259 .disable_vblank = fimd_disable_vblank,
225}; 260};
@@ -251,7 +286,7 @@ static void fimd_win_mode_set(struct device *dev,
251 win_data->ovl_height = overlay->crtc_height; 286 win_data->ovl_height = overlay->crtc_height;
252 win_data->fb_width = overlay->fb_width; 287 win_data->fb_width = overlay->fb_width;
253 win_data->fb_height = overlay->fb_height; 288 win_data->fb_height = overlay->fb_height;
254 win_data->paddr = overlay->paddr + offset; 289 win_data->dma_addr = overlay->dma_addr + offset;
255 win_data->vaddr = overlay->vaddr + offset; 290 win_data->vaddr = overlay->vaddr + offset;
256 win_data->bpp = overlay->bpp; 291 win_data->bpp = overlay->bpp;
257 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) * 292 win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
@@ -263,7 +298,7 @@ static void fimd_win_mode_set(struct device *dev,
263 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 298 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
264 win_data->ovl_width, win_data->ovl_height); 299 win_data->ovl_width, win_data->ovl_height);
265 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n", 300 DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
266 (unsigned long)win_data->paddr, 301 (unsigned long)win_data->dma_addr,
267 (unsigned long)win_data->vaddr); 302 (unsigned long)win_data->vaddr);
268 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n", 303 DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
269 overlay->fb_width, overlay->crtc_width); 304 overlay->fb_width, overlay->crtc_width);
@@ -376,16 +411,16 @@ static void fimd_win_commit(struct device *dev)
376 writel(val, ctx->regs + SHADOWCON); 411 writel(val, ctx->regs + SHADOWCON);
377 412
378 /* buffer start address */ 413 /* buffer start address */
379 val = win_data->paddr; 414 val = (unsigned long)win_data->dma_addr;
380 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); 415 writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
381 416
382 /* buffer end address */ 417 /* buffer end address */
383 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3); 418 size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
384 val = win_data->paddr + size; 419 val = (unsigned long)(win_data->dma_addr + size);
385 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); 420 writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
386 421
387 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n", 422 DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
388 (unsigned long)win_data->paddr, val, size); 423 (unsigned long)win_data->dma_addr, val, size);
389 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n", 424 DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
390 win_data->ovl_width, win_data->ovl_height); 425 win_data->ovl_width, win_data->ovl_height);
391 426
@@ -447,7 +482,6 @@ static void fimd_win_commit(struct device *dev)
447static void fimd_win_disable(struct device *dev) 482static void fimd_win_disable(struct device *dev)
448{ 483{
449 struct fimd_context *ctx = get_fimd_context(dev); 484 struct fimd_context *ctx = get_fimd_context(dev);
450 struct fimd_win_data *win_data;
451 int win = ctx->default_win; 485 int win = ctx->default_win;
452 u32 val; 486 u32 val;
453 487
@@ -456,8 +490,6 @@ static void fimd_win_disable(struct device *dev)
456 if (win < 0 || win > WINDOWS_NR) 490 if (win < 0 || win > WINDOWS_NR)
457 return; 491 return;
458 492
459 win_data = &ctx->win_data[win];
460
461 /* protect windows */ 493 /* protect windows */
462 val = readl(ctx->regs + SHADOWCON); 494 val = readl(ctx->regs + SHADOWCON);
463 val |= SHADOWCON_WINx_PROTECT(win); 495 val |= SHADOWCON_WINx_PROTECT(win);
@@ -528,6 +560,16 @@ static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
528 /* VSYNC interrupt */ 560 /* VSYNC interrupt */
529 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1); 561 writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
530 562
563 /*
564 * in case that vblank_disable_allowed is 1, it could induce
565 * the problem that manager->pipe could be -1 because with
566 * disable callback, vsync interrupt isn't disabled and at this moment,
567 * vsync interrupt could occur. the vsync interrupt would be disabled
568 * by timer handler later.
569 */
570 if (manager->pipe == -1)
571 return IRQ_HANDLED;
572
531 drm_handle_vblank(drm_dev, manager->pipe); 573 drm_handle_vblank(drm_dev, manager->pipe);
532 fimd_finish_pageflip(drm_dev, manager->pipe); 574 fimd_finish_pageflip(drm_dev, manager->pipe);
533 575
@@ -548,13 +590,6 @@ static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
548 */ 590 */
549 drm_dev->irq_enabled = 1; 591 drm_dev->irq_enabled = 1;
550 592
551 /*
552 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
553 * by drm timer once a current process gives up ownership of
554 * vblank event.(drm_vblank_put function was called)
555 */
556 drm_dev->vblank_disable_allowed = 1;
557
558 return 0; 593 return 0;
559} 594}
560 595
@@ -731,7 +766,7 @@ static int __devinit fimd_probe(struct platform_device *pdev)
731 subdrv->manager.pipe = -1; 766 subdrv->manager.pipe = -1;
732 subdrv->manager.ops = &fimd_manager_ops; 767 subdrv->manager.ops = &fimd_manager_ops;
733 subdrv->manager.overlay_ops = &fimd_overlay_ops; 768 subdrv->manager.overlay_ops = &fimd_overlay_ops;
734 subdrv->manager.display = &fimd_display; 769 subdrv->manager.display_ops = &fimd_display_ops;
735 subdrv->manager.dev = dev; 770 subdrv->manager.dev = dev;
736 771
737 platform_set_drvdata(pdev, ctx); 772 platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index a8e7a88906ed..aba0fe47f7ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -62,40 +62,28 @@ static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT; 62 return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
63} 63}
64 64
65struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 65static struct exynos_drm_gem_obj
66 struct drm_device *dev, unsigned int size, 66 *exynos_drm_gem_init(struct drm_device *drm_dev,
67 unsigned int *handle) 67 struct drm_file *file_priv, unsigned int *handle,
68 unsigned int size)
68{ 69{
69 struct exynos_drm_gem_obj *exynos_gem_obj; 70 struct exynos_drm_gem_obj *exynos_gem_obj;
70 struct exynos_drm_buf_entry *entry;
71 struct drm_gem_object *obj; 71 struct drm_gem_object *obj;
72 int ret; 72 int ret;
73 73
74 DRM_DEBUG_KMS("%s\n", __FILE__);
75
76 size = roundup(size, PAGE_SIZE);
77
78 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL); 74 exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
79 if (!exynos_gem_obj) { 75 if (!exynos_gem_obj) {
80 DRM_ERROR("failed to allocate exynos gem object.\n"); 76 DRM_ERROR("failed to allocate exynos gem object.\n");
81 return ERR_PTR(-ENOMEM); 77 return ERR_PTR(-ENOMEM);
82 } 78 }
83 79
84 /* allocate the new buffer object and memory region. */
85 entry = exynos_drm_buf_create(dev, size);
86 if (!entry) {
87 kfree(exynos_gem_obj);
88 return ERR_PTR(-ENOMEM);
89 }
90
91 exynos_gem_obj->entry = entry;
92
93 obj = &exynos_gem_obj->base; 80 obj = &exynos_gem_obj->base;
94 81
95 ret = drm_gem_object_init(dev, obj, size); 82 ret = drm_gem_object_init(drm_dev, obj, size);
96 if (ret < 0) { 83 if (ret < 0) {
97 DRM_ERROR("failed to initailize gem object.\n"); 84 DRM_ERROR("failed to initialize gem object.\n");
98 goto err_obj_init; 85 ret = -EINVAL;
86 goto err_object_init;
99 } 87 }
100 88
101 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp); 89 DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
@@ -127,24 +115,50 @@ err_handle_create:
127err_create_mmap_offset: 115err_create_mmap_offset:
128 drm_gem_object_release(obj); 116 drm_gem_object_release(obj);
129 117
130err_obj_init: 118err_object_init:
131 exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
132
133 kfree(exynos_gem_obj); 119 kfree(exynos_gem_obj);
134 120
135 return ERR_PTR(ret); 121 return ERR_PTR(ret);
136} 122}
137 123
124struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
125 struct drm_file *file_priv,
126 unsigned int *handle, unsigned long size)
127{
128
129 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
130 struct exynos_drm_gem_buf *buffer;
131
132 size = roundup(size, PAGE_SIZE);
133
134 DRM_DEBUG_KMS("%s: size = 0x%lx\n", __FILE__, size);
135
136 buffer = exynos_drm_buf_create(dev, size);
137 if (IS_ERR(buffer)) {
138 return ERR_CAST(buffer);
139 }
140
141 exynos_gem_obj = exynos_drm_gem_init(dev, file_priv, handle, size);
142 if (IS_ERR(exynos_gem_obj)) {
143 exynos_drm_buf_destroy(dev, buffer);
144 return exynos_gem_obj;
145 }
146
147 exynos_gem_obj->buffer = buffer;
148
149 return exynos_gem_obj;
150}
151
138int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data, 152int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
139 struct drm_file *file_priv) 153 struct drm_file *file_priv)
140{ 154{
141 struct drm_exynos_gem_create *args = data; 155 struct drm_exynos_gem_create *args = data;
142 struct exynos_drm_gem_obj *exynos_gem_obj; 156 struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
143 157
144 DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size); 158 DRM_DEBUG_KMS("%s\n", __FILE__);
145 159
146 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 160 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv,
147 &args->handle); 161 &args->handle, args->size);
148 if (IS_ERR(exynos_gem_obj)) 162 if (IS_ERR(exynos_gem_obj))
149 return PTR_ERR(exynos_gem_obj); 163 return PTR_ERR(exynos_gem_obj);
150 164
@@ -175,7 +189,7 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
175{ 189{
176 struct drm_gem_object *obj = filp->private_data; 190 struct drm_gem_object *obj = filp->private_data;
177 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj); 191 struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
178 struct exynos_drm_buf_entry *entry; 192 struct exynos_drm_gem_buf *buffer;
179 unsigned long pfn, vm_size; 193 unsigned long pfn, vm_size;
180 194
181 DRM_DEBUG_KMS("%s\n", __FILE__); 195 DRM_DEBUG_KMS("%s\n", __FILE__);
@@ -187,20 +201,20 @@ static int exynos_drm_gem_mmap_buffer(struct file *filp,
187 201
188 vm_size = vma->vm_end - vma->vm_start; 202 vm_size = vma->vm_end - vma->vm_start;
189 /* 203 /*
190 * a entry contains information to physically continuous memory 204 * a buffer contains information to physically continuous memory
191 * allocated by user request or at framebuffer creation. 205 * allocated by user request or at framebuffer creation.
192 */ 206 */
193 entry = exynos_gem_obj->entry; 207 buffer = exynos_gem_obj->buffer;
194 208
195 /* check if user-requested size is valid. */ 209 /* check if user-requested size is valid. */
196 if (vm_size > entry->size) 210 if (vm_size > buffer->size)
197 return -EINVAL; 211 return -EINVAL;
198 212
199 /* 213 /*
200 * get page frame number to physical memory to be mapped 214 * get page frame number to physical memory to be mapped
201 * to user space. 215 * to user space.
202 */ 216 */
203 pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT; 217 pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >> PAGE_SHIFT;
204 218
205 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn); 219 DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
206 220
@@ -281,7 +295,7 @@ void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
281 295
282 exynos_gem_obj = to_exynos_gem_obj(gem_obj); 296 exynos_gem_obj = to_exynos_gem_obj(gem_obj);
283 297
284 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry); 298 exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->buffer);
285 299
286 kfree(exynos_gem_obj); 300 kfree(exynos_gem_obj);
287} 301}
@@ -302,8 +316,8 @@ int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
302 args->pitch = args->width * args->bpp >> 3; 316 args->pitch = args->width * args->bpp >> 3;
303 args->size = args->pitch * args->height; 317 args->size = args->pitch * args->height;
304 318
305 exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size, 319 exynos_gem_obj = exynos_drm_gem_create(dev, file_priv, &args->handle,
306 &args->handle); 320 args->size);
307 if (IS_ERR(exynos_gem_obj)) 321 if (IS_ERR(exynos_gem_obj))
308 return PTR_ERR(exynos_gem_obj); 322 return PTR_ERR(exynos_gem_obj);
309 323
@@ -360,7 +374,8 @@ int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
360 374
361 mutex_lock(&dev->struct_mutex); 375 mutex_lock(&dev->struct_mutex);
362 376
363 pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset; 377 pfn = (((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
378 PAGE_SHIFT) + page_offset;
364 379
365 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn); 380 ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
366 381
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index e5fc0148277b..ef8797334e6d 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -30,13 +30,29 @@
30 struct exynos_drm_gem_obj, base) 30 struct exynos_drm_gem_obj, base)
31 31
32/* 32/*
33 * exynos drm gem buffer structure.
34 *
35 * @kvaddr: kernel virtual address to allocated memory region.
36 * @dma_addr: bus address(accessed by dma) to allocated memory region.
37 * - this address could be physical address without IOMMU and
38 * device address with IOMMU.
39 * @size: size of allocated memory region.
40 */
41struct exynos_drm_gem_buf {
42 void __iomem *kvaddr;
43 dma_addr_t dma_addr;
44 unsigned long size;
45};
46
47/*
33 * exynos drm buffer structure. 48 * exynos drm buffer structure.
34 * 49 *
35 * @base: a gem object. 50 * @base: a gem object.
36 * - a new handle to this gem object would be created 51 * - a new handle to this gem object would be created
37 * by drm_gem_handle_create(). 52 * by drm_gem_handle_create().
38 * @entry: pointer to exynos drm buffer entry object. 53 * @buffer: a pointer to exynos_drm_gem_buffer object.
39 * - containing the information to physically 54 * - contain the information to memory region allocated
55 * by user request or at framebuffer creation.
40 * continuous memory region allocated by user request 56 * continuous memory region allocated by user request
41 * or at framebuffer creation. 57 * or at framebuffer creation.
42 * 58 *
@@ -45,13 +61,13 @@
45 */ 61 */
46struct exynos_drm_gem_obj { 62struct exynos_drm_gem_obj {
47 struct drm_gem_object base; 63 struct drm_gem_object base;
48 struct exynos_drm_buf_entry *entry; 64 struct exynos_drm_gem_buf *buffer;
49}; 65};
50 66
51/* create a new buffer and get a new gem handle. */ 67/* create a new buffer and get a new gem handle. */
52struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv, 68struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
53 struct drm_device *dev, unsigned int size, 69 struct drm_file *file_priv,
54 unsigned int *handle); 70 unsigned int *handle, unsigned long size);
55 71
56/* 72/*
57 * request gem object creation and buffer allocation as the size 73 * request gem object creation and buffer allocation as the size
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d14b44e13f51..d09a6e02dc95 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -636,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
636 struct drm_device *dev = node->minor->dev; 636 struct drm_device *dev = node->minor->dev;
637 drm_i915_private_t *dev_priv = dev->dev_private; 637 drm_i915_private_t *dev_priv = dev->dev_private;
638 struct intel_ring_buffer *ring; 638 struct intel_ring_buffer *ring;
639 int ret;
639 640
640 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 641 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
641 if (ring->size == 0) 642 if (ring->size == 0)
642 return 0; 643 return 0;
643 644
645 ret = mutex_lock_interruptible(&dev->struct_mutex);
646 if (ret)
647 return ret;
648
644 seq_printf(m, "Ring %s:\n", ring->name); 649 seq_printf(m, "Ring %s:\n", ring->name);
645 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 650 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
646 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 651 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 659 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 660 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
656 661
662 mutex_unlock(&dev->struct_mutex);
663
657 return 0; 664 return 0;
658} 665}
659 666
@@ -842,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
842 struct drm_info_node *node = (struct drm_info_node *) m->private; 849 struct drm_info_node *node = (struct drm_info_node *) m->private;
843 struct drm_device *dev = node->minor->dev; 850 struct drm_device *dev = node->minor->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private; 851 drm_i915_private_t *dev_priv = dev->dev_private;
845 u16 crstanddelay = I915_READ16(CRSTANDVID); 852 u16 crstanddelay;
853 int ret;
854
855 ret = mutex_lock_interruptible(&dev->struct_mutex);
856 if (ret)
857 return ret;
858
859 crstanddelay = I915_READ16(CRSTANDVID);
860
861 mutex_unlock(&dev->struct_mutex);
846 862
847 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 863 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
848 864
@@ -940,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
940 struct drm_device *dev = node->minor->dev; 956 struct drm_device *dev = node->minor->dev;
941 drm_i915_private_t *dev_priv = dev->dev_private; 957 drm_i915_private_t *dev_priv = dev->dev_private;
942 u32 delayfreq; 958 u32 delayfreq;
943 int i; 959 int ret, i;
960
961 ret = mutex_lock_interruptible(&dev->struct_mutex);
962 if (ret)
963 return ret;
944 964
945 for (i = 0; i < 16; i++) { 965 for (i = 0; i < 16; i++) {
946 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 966 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
948 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 968 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
949 } 969 }
950 970
971 mutex_unlock(&dev->struct_mutex);
972
951 return 0; 973 return 0;
952} 974}
953 975
@@ -962,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
962 struct drm_device *dev = node->minor->dev; 984 struct drm_device *dev = node->minor->dev;
963 drm_i915_private_t *dev_priv = dev->dev_private; 985 drm_i915_private_t *dev_priv = dev->dev_private;
964 u32 inttoext; 986 u32 inttoext;
965 int i; 987 int ret, i;
988
989 ret = mutex_lock_interruptible(&dev->struct_mutex);
990 if (ret)
991 return ret;
966 992
967 for (i = 1; i <= 32; i++) { 993 for (i = 1; i <= 32; i++) {
968 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 994 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
969 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 995 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
970 } 996 }
971 997
998 mutex_unlock(&dev->struct_mutex);
999
972 return 0; 1000 return 0;
973} 1001}
974 1002
@@ -977,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
977 struct drm_info_node *node = (struct drm_info_node *) m->private; 1005 struct drm_info_node *node = (struct drm_info_node *) m->private;
978 struct drm_device *dev = node->minor->dev; 1006 struct drm_device *dev = node->minor->dev;
979 drm_i915_private_t *dev_priv = dev->dev_private; 1007 drm_i915_private_t *dev_priv = dev->dev_private;
980 u32 rgvmodectl = I915_READ(MEMMODECTL); 1008 u32 rgvmodectl, rstdbyctl;
981 u32 rstdbyctl = I915_READ(RSTDBYCTL); 1009 u16 crstandvid;
982 u16 crstandvid = I915_READ16(CRSTANDVID); 1010 int ret;
1011
1012 ret = mutex_lock_interruptible(&dev->struct_mutex);
1013 if (ret)
1014 return ret;
1015
1016 rgvmodectl = I915_READ(MEMMODECTL);
1017 rstdbyctl = I915_READ(RSTDBYCTL);
1018 crstandvid = I915_READ16(CRSTANDVID);
1019
1020 mutex_unlock(&dev->struct_mutex);
983 1021
984 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1022 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
985 "yes" : "no"); 1023 "yes" : "no");
@@ -1167,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1205 struct drm_info_node *node = (struct drm_info_node *) m->private;
1168 struct drm_device *dev = node->minor->dev; 1206 struct drm_device *dev = node->minor->dev;
1169 drm_i915_private_t *dev_priv = dev->dev_private; 1207 drm_i915_private_t *dev_priv = dev->dev_private;
1208 int ret;
1209
1210 ret = mutex_lock_interruptible(&dev->struct_mutex);
1211 if (ret)
1212 return ret;
1170 1213
1171 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1214 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1172 1215
1216 mutex_unlock(&dev->struct_mutex);
1217
1173 return 0; 1218 return 0;
1174} 1219}
1175 1220
@@ -1506,7 +1551,10 @@ drm_add_fake_info_node(struct drm_minor *minor,
1506 node->minor = minor; 1551 node->minor = minor;
1507 node->dent = ent; 1552 node->dent = ent;
1508 node->info_ent = (void *) key; 1553 node->info_ent = (void *) key;
1509 list_add(&node->list, &minor->debugfs_nodes.list); 1554
1555 mutex_lock(&minor->debugfs_lock);
1556 list_add(&node->list, &minor->debugfs_list);
1557 mutex_unlock(&minor->debugfs_lock);
1510 1558
1511 return 0; 1559 return 0;
1512} 1560}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index cc531bb59c26..15bfa9145d2b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -68,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: true)"); 69 "Enable power-saving render C-state 6 (default: true)");
70 70
71unsigned int i915_enable_fbc __read_mostly = -1; 71int i915_enable_fbc __read_mostly = -1;
72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
73MODULE_PARM_DESC(i915_enable_fbc, 73MODULE_PARM_DESC(i915_enable_fbc,
74 "Enable frame buffer compression for power savings " 74 "Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
80 "Use panel (LVDS/eDP) downclocking for power savings " 80 "Use panel (LVDS/eDP) downclocking for power savings "
81 "(default: false)"); 81 "(default: false)");
82 82
83unsigned int i915_panel_use_ssc __read_mostly = -1; 83int i915_panel_use_ssc __read_mostly = -1;
84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
85MODULE_PARM_DESC(lvds_use_ssc, 85MODULE_PARM_DESC(lvds_use_ssc,
86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
107extern int intel_agp_enabled; 107extern int intel_agp_enabled;
108 108
109#define INTEL_VGA_DEVICE(id, info) { \ 109#define INTEL_VGA_DEVICE(id, info) { \
110 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 110 .class = PCI_BASE_CLASS_DISPLAY << 16, \
111 .class_mask = 0xff0000, \ 111 .class_mask = 0xff0000, \
112 .vendor = 0x8086, \ 112 .vendor = 0x8086, \
113 .device = id, \ 113 .device = id, \
@@ -789,8 +789,8 @@ static struct vm_operations_struct i915_gem_vm_ops = {
789}; 789};
790 790
791static struct drm_driver driver = { 791static struct drm_driver driver = {
792 /* don't use mtrr's here, the Xserver or user space app should 792 /* Don't use MTRRs here; the Xserver or userspace app should
793 * deal with them for intel hardware. 793 * deal with them for Intel hardware.
794 */ 794 */
795 .driver_features = 795 .driver_features =
796 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ 796 DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4fd74b..4a9c1b979804 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,9 @@ struct drm_i915_master_private {
126 struct _drm_i915_sarea *sarea_priv; 126 struct _drm_i915_sarea *sarea_priv;
127}; 127};
128#define I915_FENCE_REG_NONE -1 128#define I915_FENCE_REG_NONE -1
129#define I915_MAX_NUM_FENCES 16
130/* 16 fences + sign bit for FENCE_REG_NONE */
131#define I915_MAX_NUM_FENCE_BITS 5
129 132
130struct drm_i915_fence_reg { 133struct drm_i915_fence_reg {
131 struct list_head lru_list; 134 struct list_head lru_list;
@@ -168,7 +171,7 @@ struct drm_i915_error_state {
168 u32 instdone1; 171 u32 instdone1;
169 u32 seqno; 172 u32 seqno;
170 u64 bbaddr; 173 u64 bbaddr;
171 u64 fence[16]; 174 u64 fence[I915_MAX_NUM_FENCES];
172 struct timeval time; 175 struct timeval time;
173 struct drm_i915_error_object { 176 struct drm_i915_error_object {
174 int page_count; 177 int page_count;
@@ -182,7 +185,7 @@ struct drm_i915_error_state {
182 u32 gtt_offset; 185 u32 gtt_offset;
183 u32 read_domains; 186 u32 read_domains;
184 u32 write_domain; 187 u32 write_domain;
185 s32 fence_reg:5; 188 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
186 s32 pinned:2; 189 s32 pinned:2;
187 u32 tiling:2; 190 u32 tiling:2;
188 u32 dirty:1; 191 u32 dirty:1;
@@ -375,7 +378,7 @@ typedef struct drm_i915_private {
375 struct notifier_block lid_notifier; 378 struct notifier_block lid_notifier;
376 379
377 int crt_ddc_pin; 380 int crt_ddc_pin;
378 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 381 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
379 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 382 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
380 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 383 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
381 384
@@ -506,7 +509,7 @@ typedef struct drm_i915_private {
506 u8 saveAR[21]; 509 u8 saveAR[21];
507 u8 saveDACMASK; 510 u8 saveDACMASK;
508 u8 saveCR[37]; 511 u8 saveCR[37];
509 uint64_t saveFENCE[16]; 512 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
510 u32 saveCURACNTR; 513 u32 saveCURACNTR;
511 u32 saveCURAPOS; 514 u32 saveCURAPOS;
512 u32 saveCURABASE; 515 u32 saveCURABASE;
@@ -777,10 +780,8 @@ struct drm_i915_gem_object {
777 * Fence register bits (if any) for this object. Will be set 780 * Fence register bits (if any) for this object. Will be set
778 * as needed when mapped into the GTT. 781 * as needed when mapped into the GTT.
779 * Protected by dev->struct_mutex. 782 * Protected by dev->struct_mutex.
780 *
781 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
782 */ 783 */
783 signed int fence_reg:5; 784 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
784 785
785 /** 786 /**
786 * Advice: are the backing pages purgeable? 787 * Advice: are the backing pages purgeable?
@@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly;
999extern unsigned int i915_powersave __read_mostly; 1000extern unsigned int i915_powersave __read_mostly;
1000extern unsigned int i915_semaphores __read_mostly; 1001extern unsigned int i915_semaphores __read_mostly;
1001extern unsigned int i915_lvds_downclock __read_mostly; 1002extern unsigned int i915_lvds_downclock __read_mostly;
1002extern unsigned int i915_panel_use_ssc __read_mostly; 1003extern int i915_panel_use_ssc __read_mostly;
1003extern int i915_vbt_sdvo_panel_type __read_mostly; 1004extern int i915_vbt_sdvo_panel_type __read_mostly;
1004extern unsigned int i915_enable_rc6 __read_mostly; 1005extern unsigned int i915_enable_rc6 __read_mostly;
1005extern unsigned int i915_enable_fbc __read_mostly; 1006extern int i915_enable_fbc __read_mostly;
1006extern bool i915_enable_hangcheck __read_mostly; 1007extern bool i915_enable_hangcheck __read_mostly;
1007 1008
1008extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1009extern int i915_suspend(struct drm_device *dev, pm_message_t state);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 6651c36b6e8a..60ff1b63b568 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1396,7 +1396,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
1396 1396
1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) { 1397 if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
1398 ret = -E2BIG; 1398 ret = -E2BIG;
1399 goto unlock; 1399 goto out;
1400 } 1400 }
1401 1401
1402 if (obj->madv != I915_MADV_WILLNEED) { 1402 if (obj->madv != I915_MADV_WILLNEED) {
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
1745 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1746 int i; 1746 int i;
1747 1747
1748 for (i = 0; i < 16; i++) { 1748 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1750 struct drm_i915_gem_object *obj = reg->obj; 1750 struct drm_i915_gem_object *obj = reg->obj;
1751 1751
@@ -2026,8 +2026,13 @@ i915_wait_request(struct intel_ring_buffer *ring,
2026 * to handle this, the waiter on a request often wants an associated 2026 * to handle this, the waiter on a request often wants an associated
2027 * buffer to have made it to the inactive list, and we would need 2027 * buffer to have made it to the inactive list, and we would need
2028 * a separate wait queue to handle that. 2028 * a separate wait queue to handle that.
2029 *
2030 * To avoid a recursion with the ilk VT-d workaround (that calls
2031 * gpu_idle when unbinding objects with interruptible==false) don't
2032 * retire requests in that case (because it might call unbind if the
2033 * active list holds the last reference to the object).
2029 */ 2034 */
2030 if (ret == 0) 2035 if (ret == 0 && dev_priv->mm.interruptible)
2031 i915_gem_retire_requests_ring(ring); 2036 i915_gem_retire_requests_ring(ring);
2032 2037
2033 return ret; 2038 return ret;
@@ -3512,9 +3517,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3512 * so emit a request to do so. 3517 * so emit a request to do so.
3513 */ 3518 */
3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3519 request = kzalloc(sizeof(*request), GFP_KERNEL);
3515 if (request) 3520 if (request) {
3516 ret = i915_add_request(obj->ring, NULL, request); 3521 ret = i915_add_request(obj->ring, NULL, request);
3517 else 3522 if (ret)
3523 kfree(request);
3524 } else
3518 ret = -ENOMEM; 3525 ret = -ENOMEM;
3519 } 3526 }
3520 3527
@@ -3613,7 +3620,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3613 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3620 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3614 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3621 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3615 3622
3616 if (IS_GEN6(dev)) { 3623 if (IS_GEN6(dev) || IS_GEN7(dev)) {
3617 /* On Gen6, we can have the GPU use the LLC (the CPU 3624 /* On Gen6, we can have the GPU use the LLC (the CPU
3618 * cache) for about a 10% performance improvement 3625 * cache) for about a 10% performance improvement
3619 * compared to uncached. Graphics requests other than 3626 * compared to uncached. Graphics requests other than
@@ -3877,7 +3884,7 @@ i915_gem_load(struct drm_device *dev)
3877 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3884 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3878 for (i = 0; i < I915_NUM_RINGS; i++) 3885 for (i = 0; i < I915_NUM_RINGS; i++)
3879 init_ring_lists(&dev_priv->ring[i]); 3886 init_ring_lists(&dev_priv->ring[i]);
3880 for (i = 0; i < 16; i++) 3887 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3881 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3888 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3882 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3889 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3883 i915_gem_retire_work_handler); 3890 i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9ee2729fe5c6..b40004b55977 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
824 824
825 /* Fences */ 825 /* Fences */
826 switch (INTEL_INFO(dev)->gen) { 826 switch (INTEL_INFO(dev)->gen) {
827 case 7:
827 case 6: 828 case 6:
828 for (i = 0; i < 16; i++) 829 for (i = 0; i < 16; i++)
829 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 830 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a09416e611f..b080cc824001 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,12 +1553,21 @@
1553 */ 1553 */
1554#define PP_READY (1 << 30) 1554#define PP_READY (1 << 30)
1555#define PP_SEQUENCE_NONE (0 << 28) 1555#define PP_SEQUENCE_NONE (0 << 28)
1556#define PP_SEQUENCE_ON (1 << 28) 1556#define PP_SEQUENCE_POWER_UP (1 << 28)
1557#define PP_SEQUENCE_OFF (2 << 28) 1557#define PP_SEQUENCE_POWER_DOWN (2 << 28)
1558#define PP_SEQUENCE_MASK 0x30000000 1558#define PP_SEQUENCE_MASK (3 << 28)
1559#define PP_SEQUENCE_SHIFT 28
1559#define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1560#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
1560#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
1561#define PP_SEQUENCE_STATE_MASK 0x0000000f 1561#define PP_SEQUENCE_STATE_MASK 0x0000000f
1562#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
1563#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
1564#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
1565#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
1566#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
1567#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
1568#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
1569#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
1570#define PP_SEQUENCE_STATE_RESET (0xf << 0)
1562#define PP_CONTROL 0x61204 1571#define PP_CONTROL 0x61204
1563#define POWER_TARGET_ON (1 << 0) 1572#define POWER_TARGET_ON (1 << 0)
1564#define PP_ON_DELAYS 0x61208 1573#define PP_ON_DELAYS 0x61208
@@ -3444,6 +3453,10 @@
3444#define GT_FIFO_FREE_ENTRIES 0x120008 3453#define GT_FIFO_FREE_ENTRIES 0x120008
3445#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3454#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3446 3455
3456#define GEN6_UCGCTL2 0x9404
3457# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
3458# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
3459
3447#define GEN6_RPNSWREQ 0xA008 3460#define GEN6_RPNSWREQ 0xA008
3448#define GEN6_TURBO_DISABLE (1<<31) 3461#define GEN6_TURBO_DISABLE (1<<31)
3449#define GEN6_FREQUENCY(x) ((x)<<25) 3462#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f8f602d76650..7886e4fb60e3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
370 370
371 /* Fences */ 371 /* Fences */
372 switch (INTEL_INFO(dev)->gen) { 372 switch (INTEL_INFO(dev)->gen) {
373 case 7:
373 case 6: 374 case 6:
374 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
375 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
404 405
405 /* Fences */ 406 /* Fences */
406 switch (INTEL_INFO(dev)->gen) { 407 switch (INTEL_INFO(dev)->gen) {
408 case 7:
407 case 6: 409 case 6:
408 for (i = 0; i < 16; i++) 410 for (i = 0; i < 16; i++)
409 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); 411 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 981b1f1c04d8..e77a863a3833 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2933 2933
2934 /* For PCH DP, enable TRANS_DP_CTL */ 2934 /* For PCH DP, enable TRANS_DP_CTL */
2935 if (HAS_PCH_CPT(dev) && 2935 if (HAS_PCH_CPT(dev) &&
2936 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2936 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2937 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2937 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2938 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2938 reg = TRANS_DP_CTL(pipe); 2939 reg = TRANS_DP_CTL(pipe);
2939 temp = I915_READ(reg); 2940 temp = I915_READ(reg);
@@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4711 lvds_bpc = 6; 4712 lvds_bpc = 6;
4712 4713
4713 if (lvds_bpc < display_bpc) { 4714 if (lvds_bpc < display_bpc) {
4714 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4715 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4715 display_bpc = lvds_bpc; 4716 display_bpc = lvds_bpc;
4716 } 4717 }
4717 continue; 4718 continue;
@@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4722 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4723 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4723 4724
4724 if (edp_bpc < display_bpc) { 4725 if (edp_bpc < display_bpc) {
4725 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4726 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4726 display_bpc = edp_bpc; 4727 display_bpc = edp_bpc;
4727 } 4728 }
4728 continue; 4729 continue;
@@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4737 /* Don't use an invalid EDID bpc value */ 4738 /* Don't use an invalid EDID bpc value */
4738 if (connector->display_info.bpc && 4739 if (connector->display_info.bpc &&
4739 connector->display_info.bpc < display_bpc) { 4740 connector->display_info.bpc < display_bpc) {
4740 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4741 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4741 display_bpc = connector->display_info.bpc; 4742 display_bpc = connector->display_info.bpc;
4742 } 4743 }
4743 } 4744 }
@@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4748 */ 4749 */
4749 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4750 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4750 if (display_bpc > 8 && display_bpc < 12) { 4751 if (display_bpc > 8 && display_bpc < 12) {
4751 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); 4752 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4752 display_bpc = 12; 4753 display_bpc = 12;
4753 } else { 4754 } else {
4754 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); 4755 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4755 display_bpc = 8; 4756 display_bpc = 8;
4756 } 4757 }
4757 } 4758 }
@@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4789 4790
4790 display_bpc = min(display_bpc, bpc); 4791 display_bpc = min(display_bpc, bpc);
4791 4792
4792 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4793 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4793 bpc, display_bpc); 4794 bpc, display_bpc);
4794 4795
4795 *pipe_bpp = display_bpc * 3; 4796 *pipe_bpp = display_bpc * 3;
4796 4797
@@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5671 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5672 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5672 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5673 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5673 pipeconf |= PIPECONF_DITHER_EN; 5674 pipeconf |= PIPECONF_DITHER_EN;
5674 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5675 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5675 } 5676 }
5676 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5677 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5677 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5678 intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
8148 I915_WRITE(WM2_LP_ILK, 0); 8149 I915_WRITE(WM2_LP_ILK, 0);
8149 I915_WRITE(WM1_LP_ILK, 0); 8150 I915_WRITE(WM1_LP_ILK, 0);
8150 8151
8152 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8153 * gating disable must be set. Failure to set it results in
8154 * flickering pixels due to Z write ordering failures after
8155 * some amount of runtime in the Mesa "fire" demo, and Unigine
8156 * Sanctuary and Tropics, and apparently anything else with
8157 * alpha test or pixel discard.
8158 *
8159 * According to the spec, bit 11 (RCCUNIT) must also be set,
8160 * but we didn't debug actual testcases to find it out.
8161 */
8162 I915_WRITE(GEN6_UCGCTL2,
8163 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8164 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8165
8151 /* 8166 /*
8152 * According to the spec the following bits should be 8167 * According to the spec the following bits should be
8153 * set in order to enable memory self-refresh and fbc: 8168 * set in order to enable memory self-refresh and fbc:
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09b318b0227f..4d0358fad937 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,6 @@ struct intel_dp {
59 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 60 bool is_pch_edp;
61 uint8_t train_set[4]; 61 uint8_t train_set[4];
62 uint8_t link_status[DP_LINK_STATUS_SIZE];
63 int panel_power_up_delay; 62 int panel_power_up_delay;
64 int panel_power_down_delay; 63 int panel_power_down_delay;
65 int panel_power_cycle_delay; 64 int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
68 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 67 struct drm_display_mode *panel_fixed_mode; /* for eDP */
69 struct delayed_work panel_vdd_work; 68 struct delayed_work panel_vdd_work;
70 bool want_panel_vdd; 69 bool want_panel_vdd;
71 unsigned long panel_off_jiffies;
72}; 70};
73 71
74/** 72/**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
157static int 155static int
158intel_dp_max_lane_count(struct intel_dp *intel_dp) 156intel_dp_max_lane_count(struct intel_dp *intel_dp)
159{ 157{
160 int max_lane_count = 4; 158 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
161 159 switch (max_lane_count) {
162 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 160 case 1: case 2: case 4:
163 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 161 break;
164 switch (max_lane_count) { 162 default:
165 case 1: case 2: case 4: 163 max_lane_count = 4;
166 break;
167 default:
168 max_lane_count = 4;
169 }
170 } 164 }
171 return max_lane_count; 165 return max_lane_count;
172} 166}
@@ -768,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
768 continue; 762 continue;
769 763
770 intel_dp = enc_to_intel_dp(encoder); 764 intel_dp = enc_to_intel_dp(encoder);
771 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 765 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
766 intel_dp->base.type == INTEL_OUTPUT_EDP)
767 {
772 lane_count = intel_dp->lane_count; 768 lane_count = intel_dp->lane_count;
773 break; 769 break;
774 } else if (is_edp(intel_dp)) {
775 lane_count = dev_priv->edp.lanes;
776 break;
777 } 770 }
778 } 771 }
779 772
@@ -810,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
810 struct drm_display_mode *adjusted_mode) 803 struct drm_display_mode *adjusted_mode)
811{ 804{
812 struct drm_device *dev = encoder->dev; 805 struct drm_device *dev = encoder->dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
813 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 807 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
814 struct drm_crtc *crtc = intel_dp->base.base.crtc; 808 struct drm_crtc *crtc = intel_dp->base.base.crtc;
815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
822 ironlake_edp_pll_off(encoder); 816 ironlake_edp_pll_off(encoder);
823 } 817 }
824 818
825 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 819 /*
826 intel_dp->DP |= intel_dp->color_range; 820 * There are three kinds of DP registers:
821 *
822 * IBX PCH
823 * CPU
824 * CPT PCH
825 *
826 * IBX PCH and CPU are the same for almost everything,
827 * except that the CPU DP PLL is configured in this
828 * register
829 *
830 * CPT PCH is quite different, having many bits moved
831 * to the TRANS_DP_CTL register instead. That
832 * configuration happens (oddly) in ironlake_pch_enable
833 */
827 834
828 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 835 /* Preserve the BIOS-computed detected bit. This is
829 intel_dp->DP |= DP_SYNC_HS_HIGH; 836 * supposed to be read-only.
830 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 837 */
831 intel_dp->DP |= DP_SYNC_VS_HIGH; 838 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
839 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
832 840
833 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 841 /* Handle DP bits in common between all three register formats */
834 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 842
835 else 843 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
836 intel_dp->DP |= DP_LINK_TRAIN_OFF;
837 844
838 switch (intel_dp->lane_count) { 845 switch (intel_dp->lane_count) {
839 case 1: 846 case 1:
@@ -852,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 859 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
853 intel_write_eld(encoder, adjusted_mode); 860 intel_write_eld(encoder, adjusted_mode);
854 } 861 }
855
856 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 862 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
857 intel_dp->link_configuration[0] = intel_dp->link_bw; 863 intel_dp->link_configuration[0] = intel_dp->link_bw;
858 intel_dp->link_configuration[1] = intel_dp->lane_count; 864 intel_dp->link_configuration[1] = intel_dp->lane_count;
859 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 865 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
860
861 /* 866 /*
862 * Check for DPCD version > 1.1 and enhanced framing support 867 * Check for DPCD version > 1.1 and enhanced framing support
863 */ 868 */
864 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 869 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
865 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 870 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
866 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 871 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
867 intel_dp->DP |= DP_ENHANCED_FRAMING;
868 } 872 }
869 873
870 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 874 /* Split out the IBX/CPU vs CPT settings */
871 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
872 intel_dp->DP |= DP_PIPEB_SELECT;
873 875
874 if (is_cpu_edp(intel_dp)) { 876 if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
875 /* don't miss out required setting for eDP */ 877 intel_dp->DP |= intel_dp->color_range;
876 intel_dp->DP |= DP_PLL_ENABLE; 878
877 if (adjusted_mode->clock < 200000) 879 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
878 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 880 intel_dp->DP |= DP_SYNC_HS_HIGH;
879 else 881 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
880 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 882 intel_dp->DP |= DP_SYNC_VS_HIGH;
883 intel_dp->DP |= DP_LINK_TRAIN_OFF;
884
885 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
886 intel_dp->DP |= DP_ENHANCED_FRAMING;
887
888 if (intel_crtc->pipe == 1)
889 intel_dp->DP |= DP_PIPEB_SELECT;
890
891 if (is_cpu_edp(intel_dp)) {
892 /* don't miss out required setting for eDP */
893 intel_dp->DP |= DP_PLL_ENABLE;
894 if (adjusted_mode->clock < 200000)
895 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
896 else
897 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
898 }
899 } else {
900 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
881 } 901 }
882} 902}
883 903
884static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 904#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
905#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
906
907#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
908#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
909
910#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
911#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
912
913static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
914 u32 mask,
915 u32 value)
885{ 916{
886 unsigned long off_time; 917 struct drm_device *dev = intel_dp->base.base.dev;
887 unsigned long delay; 918 struct drm_i915_private *dev_priv = dev->dev_private;
888 919
889 DRM_DEBUG_KMS("Wait for panel power off time\n"); 920 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
921 mask, value,
922 I915_READ(PCH_PP_STATUS),
923 I915_READ(PCH_PP_CONTROL));
890 924
891 if (ironlake_edp_have_panel_power(intel_dp) || 925 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
892 ironlake_edp_have_panel_vdd(intel_dp)) 926 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
893 { 927 I915_READ(PCH_PP_STATUS),
894 DRM_DEBUG_KMS("Panel still on, no delay needed\n"); 928 I915_READ(PCH_PP_CONTROL));
895 return;
896 } 929 }
930}
897 931
898 off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); 932static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
899 if (time_after(jiffies, off_time)) { 933{
900 DRM_DEBUG_KMS("Time already passed"); 934 DRM_DEBUG_KMS("Wait for panel power on\n");
901 return; 935 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
902 } 936}
903 delay = jiffies_to_msecs(off_time - jiffies); 937
904 if (delay > intel_dp->panel_power_down_delay) 938static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
905 delay = intel_dp->panel_power_down_delay; 939{
906 DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); 940 DRM_DEBUG_KMS("Wait for panel power off time\n");
907 msleep(delay); 941 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
942}
943
944static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
945{
946 DRM_DEBUG_KMS("Wait for panel power cycle\n");
947 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
948}
949
950
951/* Read the current pp_control value, unlocking the register if it
952 * is locked
953 */
954
955static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
956{
957 u32 control = I915_READ(PCH_PP_CONTROL);
958
959 control &= ~PANEL_UNLOCK_MASK;
960 control |= PANEL_UNLOCK_REGS;
961 return control;
908} 962}
909 963
910static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 964static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
921 "eDP VDD already requested on\n"); 975 "eDP VDD already requested on\n");
922 976
923 intel_dp->want_panel_vdd = true; 977 intel_dp->want_panel_vdd = true;
978
924 if (ironlake_edp_have_panel_vdd(intel_dp)) { 979 if (ironlake_edp_have_panel_vdd(intel_dp)) {
925 DRM_DEBUG_KMS("eDP VDD already on\n"); 980 DRM_DEBUG_KMS("eDP VDD already on\n");
926 return; 981 return;
927 } 982 }
928 983
929 ironlake_wait_panel_off(intel_dp); 984 if (!ironlake_edp_have_panel_power(intel_dp))
930 pp = I915_READ(PCH_PP_CONTROL); 985 ironlake_wait_panel_power_cycle(intel_dp);
931 pp &= ~PANEL_UNLOCK_MASK; 986
932 pp |= PANEL_UNLOCK_REGS; 987 pp = ironlake_get_pp_control(dev_priv);
933 pp |= EDP_FORCE_VDD; 988 pp |= EDP_FORCE_VDD;
934 I915_WRITE(PCH_PP_CONTROL, pp); 989 I915_WRITE(PCH_PP_CONTROL, pp);
935 POSTING_READ(PCH_PP_CONTROL); 990 POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
952 u32 pp; 1007 u32 pp;
953 1008
954 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1009 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
955 pp = I915_READ(PCH_PP_CONTROL); 1010 pp = ironlake_get_pp_control(dev_priv);
956 pp &= ~PANEL_UNLOCK_MASK;
957 pp |= PANEL_UNLOCK_REGS;
958 pp &= ~EDP_FORCE_VDD; 1011 pp &= ~EDP_FORCE_VDD;
959 I915_WRITE(PCH_PP_CONTROL, pp); 1012 I915_WRITE(PCH_PP_CONTROL, pp);
960 POSTING_READ(PCH_PP_CONTROL); 1013 POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
962 /* Make sure sequencer is idle before allowing subsequent activity */ 1015 /* Make sure sequencer is idle before allowing subsequent activity */
963 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1016 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
964 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1017 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
965 intel_dp->panel_off_jiffies = jiffies; 1018
1019 msleep(intel_dp->panel_power_down_delay);
966 } 1020 }
967} 1021}
968 1022
@@ -972,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
972 struct intel_dp, panel_vdd_work); 1026 struct intel_dp, panel_vdd_work);
973 struct drm_device *dev = intel_dp->base.base.dev; 1027 struct drm_device *dev = intel_dp->base.base.dev;
974 1028
975 mutex_lock(&dev->struct_mutex); 1029 mutex_lock(&dev->mode_config.mutex);
976 ironlake_panel_vdd_off_sync(intel_dp); 1030 ironlake_panel_vdd_off_sync(intel_dp);
977 mutex_unlock(&dev->struct_mutex); 1031 mutex_unlock(&dev->mode_config.mutex);
978} 1032}
979 1033
980static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1034static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
984 1038
985 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1039 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
986 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1040 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
987 1041
988 intel_dp->want_panel_vdd = false; 1042 intel_dp->want_panel_vdd = false;
989 1043
990 if (sync) { 1044 if (sync) {
@@ -1000,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1000 } 1054 }
1001} 1055}
1002 1056
1003/* Returns true if the panel was already on when called */
1004static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1057static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1005{ 1058{
1006 struct drm_device *dev = intel_dp->base.base.dev; 1059 struct drm_device *dev = intel_dp->base.base.dev;
1007 struct drm_i915_private *dev_priv = dev->dev_private; 1060 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; 1061 u32 pp;
1009 1062
1010 if (!is_edp(intel_dp)) 1063 if (!is_edp(intel_dp))
1011 return; 1064 return;
1012 if (ironlake_edp_have_panel_power(intel_dp)) 1065
1066 DRM_DEBUG_KMS("Turn eDP power on\n");
1067
1068 if (ironlake_edp_have_panel_power(intel_dp)) {
1069 DRM_DEBUG_KMS("eDP power already on\n");
1013 return; 1070 return;
1071 }
1014 1072
1015 ironlake_wait_panel_off(intel_dp); 1073 ironlake_wait_panel_power_cycle(intel_dp);
1016 pp = I915_READ(PCH_PP_CONTROL);
1017 pp &= ~PANEL_UNLOCK_MASK;
1018 pp |= PANEL_UNLOCK_REGS;
1019 1074
1075 pp = ironlake_get_pp_control(dev_priv);
1020 if (IS_GEN5(dev)) { 1076 if (IS_GEN5(dev)) {
1021 /* ILK workaround: disable reset around power sequence */ 1077 /* ILK workaround: disable reset around power sequence */
1022 pp &= ~PANEL_POWER_RESET; 1078 pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1025 } 1081 }
1026 1082
1027 pp |= POWER_TARGET_ON; 1083 pp |= POWER_TARGET_ON;
1084 if (!IS_GEN5(dev))
1085 pp |= PANEL_POWER_RESET;
1086
1028 I915_WRITE(PCH_PP_CONTROL, pp); 1087 I915_WRITE(PCH_PP_CONTROL, pp);
1029 POSTING_READ(PCH_PP_CONTROL); 1088 POSTING_READ(PCH_PP_CONTROL);
1030 1089
1031 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 1090 ironlake_wait_panel_on(intel_dp);
1032 5000))
1033 DRM_ERROR("panel on wait timed out: 0x%08x\n",
1034 I915_READ(PCH_PP_STATUS));
1035 1091
1036 if (IS_GEN5(dev)) { 1092 if (IS_GEN5(dev)) {
1037 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1093 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1040 } 1096 }
1041} 1097}
1042 1098
1043static void ironlake_edp_panel_off(struct drm_encoder *encoder) 1099static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1044{ 1100{
1045 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1101 struct drm_device *dev = intel_dp->base.base.dev;
1046 struct drm_device *dev = encoder->dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1102 struct drm_i915_private *dev_priv = dev->dev_private;
1048 u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | 1103 u32 pp;
1049 PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
1050 1104
1051 if (!is_edp(intel_dp)) 1105 if (!is_edp(intel_dp))
1052 return; 1106 return;
1053 pp = I915_READ(PCH_PP_CONTROL);
1054 pp &= ~PANEL_UNLOCK_MASK;
1055 pp |= PANEL_UNLOCK_REGS;
1056 1107
1057 if (IS_GEN5(dev)) { 1108 DRM_DEBUG_KMS("Turn eDP power off\n");
1058 /* ILK workaround: disable reset around power sequence */
1059 pp &= ~PANEL_POWER_RESET;
1060 I915_WRITE(PCH_PP_CONTROL, pp);
1061 POSTING_READ(PCH_PP_CONTROL);
1062 }
1063 1109
1064 intel_dp->panel_off_jiffies = jiffies; 1110 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
1065 1111
1066 if (IS_GEN5(dev)) { 1112 pp = ironlake_get_pp_control(dev_priv);
1067 pp &= ~POWER_TARGET_ON; 1113 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1068 I915_WRITE(PCH_PP_CONTROL, pp); 1114 I915_WRITE(PCH_PP_CONTROL, pp);
1069 POSTING_READ(PCH_PP_CONTROL); 1115 POSTING_READ(PCH_PP_CONTROL);
1070 pp &= ~POWER_TARGET_ON;
1071 I915_WRITE(PCH_PP_CONTROL, pp);
1072 POSTING_READ(PCH_PP_CONTROL);
1073 msleep(intel_dp->panel_power_cycle_delay);
1074
1075 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
1076 DRM_ERROR("panel off wait timed out: 0x%08x\n",
1077 I915_READ(PCH_PP_STATUS));
1078 1116
1079 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1117 ironlake_wait_panel_off(intel_dp);
1080 I915_WRITE(PCH_PP_CONTROL, pp);
1081 POSTING_READ(PCH_PP_CONTROL);
1082 }
1083} 1118}
1084 1119
1085static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1120static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1099 * allowing it to appear. 1134 * allowing it to appear.
1100 */ 1135 */
1101 msleep(intel_dp->backlight_on_delay); 1136 msleep(intel_dp->backlight_on_delay);
1102 pp = I915_READ(PCH_PP_CONTROL); 1137 pp = ironlake_get_pp_control(dev_priv);
1103 pp &= ~PANEL_UNLOCK_MASK;
1104 pp |= PANEL_UNLOCK_REGS;
1105 pp |= EDP_BLC_ENABLE; 1138 pp |= EDP_BLC_ENABLE;
1106 I915_WRITE(PCH_PP_CONTROL, pp); 1139 I915_WRITE(PCH_PP_CONTROL, pp);
1107 POSTING_READ(PCH_PP_CONTROL); 1140 POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1117 return; 1150 return;
1118 1151
1119 DRM_DEBUG_KMS("\n"); 1152 DRM_DEBUG_KMS("\n");
1120 pp = I915_READ(PCH_PP_CONTROL); 1153 pp = ironlake_get_pp_control(dev_priv);
1121 pp &= ~PANEL_UNLOCK_MASK;
1122 pp |= PANEL_UNLOCK_REGS;
1123 pp &= ~EDP_BLC_ENABLE; 1154 pp &= ~EDP_BLC_ENABLE;
1124 I915_WRITE(PCH_PP_CONTROL, pp); 1155 I915_WRITE(PCH_PP_CONTROL, pp);
1125 POSTING_READ(PCH_PP_CONTROL); 1156 POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1187{ 1218{
1188 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1219 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1189 1220
1221 ironlake_edp_backlight_off(intel_dp);
1222 ironlake_edp_panel_off(intel_dp);
1223
1190 /* Wake up the sink first */ 1224 /* Wake up the sink first */
1191 ironlake_edp_panel_vdd_on(intel_dp); 1225 ironlake_edp_panel_vdd_on(intel_dp);
1192 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1226 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1227 intel_dp_link_down(intel_dp);
1193 ironlake_edp_panel_vdd_off(intel_dp, false); 1228 ironlake_edp_panel_vdd_off(intel_dp, false);
1194 1229
1195 /* Make sure the panel is off before trying to 1230 /* Make sure the panel is off before trying to
1196 * change the mode 1231 * change the mode
1197 */ 1232 */
1198 ironlake_edp_backlight_off(intel_dp);
1199 intel_dp_link_down(intel_dp);
1200 ironlake_edp_panel_off(encoder);
1201} 1233}
1202 1234
1203static void intel_dp_commit(struct drm_encoder *encoder) 1235static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1211 intel_dp_start_link_train(intel_dp); 1243 intel_dp_start_link_train(intel_dp);
1212 ironlake_edp_panel_on(intel_dp); 1244 ironlake_edp_panel_on(intel_dp);
1213 ironlake_edp_panel_vdd_off(intel_dp, true); 1245 ironlake_edp_panel_vdd_off(intel_dp, true);
1214
1215 intel_dp_complete_link_train(intel_dp); 1246 intel_dp_complete_link_train(intel_dp);
1216 ironlake_edp_backlight_on(intel_dp); 1247 ironlake_edp_backlight_on(intel_dp);
1217 1248
@@ -1230,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1230 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1261 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1231 1262
1232 if (mode != DRM_MODE_DPMS_ON) { 1263 if (mode != DRM_MODE_DPMS_ON) {
1264 ironlake_edp_backlight_off(intel_dp);
1265 ironlake_edp_panel_off(intel_dp);
1266
1233 ironlake_edp_panel_vdd_on(intel_dp); 1267 ironlake_edp_panel_vdd_on(intel_dp);
1234 if (is_edp(intel_dp))
1235 ironlake_edp_backlight_off(intel_dp);
1236 intel_dp_sink_dpms(intel_dp, mode); 1268 intel_dp_sink_dpms(intel_dp, mode);
1237 intel_dp_link_down(intel_dp); 1269 intel_dp_link_down(intel_dp);
1238 ironlake_edp_panel_off(encoder);
1239 if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
1240 ironlake_edp_pll_off(encoder);
1241 ironlake_edp_panel_vdd_off(intel_dp, false); 1270 ironlake_edp_panel_vdd_off(intel_dp, false);
1271
1272 if (is_cpu_edp(intel_dp))
1273 ironlake_edp_pll_off(encoder);
1242 } else { 1274 } else {
1275 if (is_cpu_edp(intel_dp))
1276 ironlake_edp_pll_on(encoder);
1277
1243 ironlake_edp_panel_vdd_on(intel_dp); 1278 ironlake_edp_panel_vdd_on(intel_dp);
1244 intel_dp_sink_dpms(intel_dp, mode); 1279 intel_dp_sink_dpms(intel_dp, mode);
1245 if (!(dp_reg & DP_PORT_EN)) { 1280 if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1247 ironlake_edp_panel_on(intel_dp); 1282 ironlake_edp_panel_on(intel_dp);
1248 ironlake_edp_panel_vdd_off(intel_dp, true); 1283 ironlake_edp_panel_vdd_off(intel_dp, true);
1249 intel_dp_complete_link_train(intel_dp); 1284 intel_dp_complete_link_train(intel_dp);
1250 ironlake_edp_backlight_on(intel_dp);
1251 } else 1285 } else
1252 ironlake_edp_panel_vdd_off(intel_dp, false); 1286 ironlake_edp_panel_vdd_off(intel_dp, false);
1253 ironlake_edp_backlight_on(intel_dp); 1287 ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1285 * link status information 1319 * link status information
1286 */ 1320 */
1287static bool 1321static bool
1288intel_dp_get_link_status(struct intel_dp *intel_dp) 1322intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1289{ 1323{
1290 return intel_dp_aux_native_read_retry(intel_dp, 1324 return intel_dp_aux_native_read_retry(intel_dp,
1291 DP_LANE0_1_STATUS, 1325 DP_LANE0_1_STATUS,
1292 intel_dp->link_status, 1326 link_status,
1293 DP_LINK_STATUS_SIZE); 1327 DP_LINK_STATUS_SIZE);
1294} 1328}
1295 1329
@@ -1301,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1301} 1335}
1302 1336
1303static uint8_t 1337static uint8_t
1304intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 1338intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1305 int lane) 1339 int lane)
1306{ 1340{
1307 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1308 int s = ((lane & 1) ? 1341 int s = ((lane & 1) ?
1309 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1342 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1310 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1343 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1311 uint8_t l = intel_dp_link_status(link_status, i); 1344 uint8_t l = adjust_request[lane>>1];
1312 1345
1313 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1346 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1314} 1347}
1315 1348
1316static uint8_t 1349static uint8_t
1317intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 1350intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1318 int lane) 1351 int lane)
1319{ 1352{
1320 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1321 int s = ((lane & 1) ? 1353 int s = ((lane & 1) ?
1322 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1354 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1323 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1355 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1324 uint8_t l = intel_dp_link_status(link_status, i); 1356 uint8_t l = adjust_request[lane>>1];
1325 1357
1326 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1358 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1327} 1359}
@@ -1344,6 +1376,7 @@ static char *link_train_names[] = {
1344 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1376 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1345 */ 1377 */
1346#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 1378#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
1379#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
1347 1380
1348static uint8_t 1381static uint8_t
1349intel_dp_pre_emphasis_max(uint8_t voltage_swing) 1382intel_dp_pre_emphasis_max(uint8_t voltage_swing)
@@ -1362,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1362} 1395}
1363 1396
1364static void 1397static void
1365intel_get_adjust_train(struct intel_dp *intel_dp) 1398intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1366{ 1399{
1400 struct drm_device *dev = intel_dp->base.base.dev;
1367 uint8_t v = 0; 1401 uint8_t v = 0;
1368 uint8_t p = 0; 1402 uint8_t p = 0;
1369 int lane; 1403 int lane;
1404 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1405 int voltage_max;
1370 1406
1371 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1407 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1372 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); 1408 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1373 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); 1409 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1374 1410
1375 if (this_v > v) 1411 if (this_v > v)
1376 v = this_v; 1412 v = this_v;
@@ -1378,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1378 p = this_p; 1414 p = this_p;
1379 } 1415 }
1380 1416
1381 if (v >= I830_DP_VOLTAGE_MAX) 1417 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1382 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 1418 voltage_max = I830_DP_VOLTAGE_MAX_CPT;
1419 else
1420 voltage_max = I830_DP_VOLTAGE_MAX;
1421 if (v >= voltage_max)
1422 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1383 1423
1384 if (p >= intel_dp_pre_emphasis_max(v)) 1424 if (p >= intel_dp_pre_emphasis_max(v))
1385 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1425 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
@@ -1389,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1389} 1429}
1390 1430
1391static uint32_t 1431static uint32_t
1392intel_dp_signal_levels(uint8_t train_set, int lane_count) 1432intel_dp_signal_levels(uint8_t train_set)
1393{ 1433{
1394 uint32_t signal_levels = 0; 1434 uint32_t signal_levels = 0;
1395 1435
@@ -1458,9 +1498,8 @@ static uint8_t
1458intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1498intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1459 int lane) 1499 int lane)
1460{ 1500{
1461 int i = DP_LANE0_1_STATUS + (lane >> 1);
1462 int s = (lane & 1) * 4; 1501 int s = (lane & 1) * 4;
1463 uint8_t l = intel_dp_link_status(link_status, i); 1502 uint8_t l = link_status[lane>>1];
1464 1503
1465 return (l >> s) & 0xf; 1504 return (l >> s) & 0xf;
1466} 1505}
@@ -1485,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
1485 DP_LANE_CHANNEL_EQ_DONE|\ 1524 DP_LANE_CHANNEL_EQ_DONE|\
1486 DP_LANE_SYMBOL_LOCKED) 1525 DP_LANE_SYMBOL_LOCKED)
1487static bool 1526static bool
1488intel_channel_eq_ok(struct intel_dp *intel_dp) 1527intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1489{ 1528{
1490 uint8_t lane_align; 1529 uint8_t lane_align;
1491 uint8_t lane_status; 1530 uint8_t lane_status;
1492 int lane; 1531 int lane;
1493 1532
1494 lane_align = intel_dp_link_status(intel_dp->link_status, 1533 lane_align = intel_dp_link_status(link_status,
1495 DP_LANE_ALIGN_STATUS_UPDATED); 1534 DP_LANE_ALIGN_STATUS_UPDATED);
1496 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1535 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1497 return false; 1536 return false;
1498 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1537 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1499 lane_status = intel_get_lane_status(intel_dp->link_status, lane); 1538 lane_status = intel_get_lane_status(link_status, lane);
1500 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1539 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1501 return false; 1540 return false;
1502 } 1541 }
@@ -1521,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1521 1560
1522 ret = intel_dp_aux_native_write(intel_dp, 1561 ret = intel_dp_aux_native_write(intel_dp,
1523 DP_TRAINING_LANE0_SET, 1562 DP_TRAINING_LANE0_SET,
1524 intel_dp->train_set, 4); 1563 intel_dp->train_set,
1525 if (ret != 4) 1564 intel_dp->lane_count);
1565 if (ret != intel_dp->lane_count)
1526 return false; 1566 return false;
1527 1567
1528 return true; 1568 return true;
@@ -1538,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1538 int i; 1578 int i;
1539 uint8_t voltage; 1579 uint8_t voltage;
1540 bool clock_recovery = false; 1580 bool clock_recovery = false;
1541 int tries; 1581 int voltage_tries, loop_tries;
1542 u32 reg; 1582 u32 reg;
1543 uint32_t DP = intel_dp->DP; 1583 uint32_t DP = intel_dp->DP;
1544 1584
@@ -1565,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1565 DP &= ~DP_LINK_TRAIN_MASK; 1605 DP &= ~DP_LINK_TRAIN_MASK;
1566 memset(intel_dp->train_set, 0, 4); 1606 memset(intel_dp->train_set, 0, 4);
1567 voltage = 0xff; 1607 voltage = 0xff;
1568 tries = 0; 1608 voltage_tries = 0;
1609 loop_tries = 0;
1569 clock_recovery = false; 1610 clock_recovery = false;
1570 for (;;) { 1611 for (;;) {
1571 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1612 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1613 uint8_t link_status[DP_LINK_STATUS_SIZE];
1572 uint32_t signal_levels; 1614 uint32_t signal_levels;
1573 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1615
1616 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1574 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1617 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1575 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1618 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1576 } else { 1619 } else {
1577 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1620 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1621 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1578 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1622 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1579 } 1623 }
1580 1624
@@ -1590,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1590 /* Set training pattern 1 */ 1634 /* Set training pattern 1 */
1591 1635
1592 udelay(100); 1636 udelay(100);
1593 if (!intel_dp_get_link_status(intel_dp)) 1637 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1638 DRM_ERROR("failed to get link status\n");
1594 break; 1639 break;
1640 }
1595 1641
1596 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1642 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1643 DRM_DEBUG_KMS("clock recovery OK\n");
1597 clock_recovery = true; 1644 clock_recovery = true;
1598 break; 1645 break;
1599 } 1646 }
@@ -1602,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1602 for (i = 0; i < intel_dp->lane_count; i++) 1649 for (i = 0; i < intel_dp->lane_count; i++)
1603 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1650 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1604 break; 1651 break;
1605 if (i == intel_dp->lane_count) 1652 if (i == intel_dp->lane_count) {
1606 break; 1653 ++loop_tries;
1654 if (loop_tries == 5) {
1655 DRM_DEBUG_KMS("too many full retries, give up\n");
1656 break;
1657 }
1658 memset(intel_dp->train_set, 0, 4);
1659 voltage_tries = 0;
1660 continue;
1661 }
1607 1662
1608 /* Check to see if we've tried the same voltage 5 times */ 1663 /* Check to see if we've tried the same voltage 5 times */
1609 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1664 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1610 ++tries; 1665 ++voltage_tries;
1611 if (tries == 5) 1666 if (voltage_tries == 5) {
1667 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1612 break; 1668 break;
1669 }
1613 } else 1670 } else
1614 tries = 0; 1671 voltage_tries = 0;
1615 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1672 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1616 1673
1617 /* Compute new intel_dp->train_set as requested by target */ 1674 /* Compute new intel_dp->train_set as requested by target */
1618 intel_get_adjust_train(intel_dp); 1675 intel_get_adjust_train(intel_dp, link_status);
1619 } 1676 }
1620 1677
1621 intel_dp->DP = DP; 1678 intel_dp->DP = DP;
@@ -1638,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1638 for (;;) { 1695 for (;;) {
1639 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1696 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1640 uint32_t signal_levels; 1697 uint32_t signal_levels;
1698 uint8_t link_status[DP_LINK_STATUS_SIZE];
1641 1699
1642 if (cr_tries > 5) { 1700 if (cr_tries > 5) {
1643 DRM_ERROR("failed to train DP, aborting\n"); 1701 DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1645 break; 1703 break;
1646 } 1704 }
1647 1705
1648 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1706 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1649 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1650 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1651 } else { 1709 } else {
1652 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1710 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1653 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1711 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1654 } 1712 }
1655 1713
@@ -1665,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1665 break; 1723 break;
1666 1724
1667 udelay(400); 1725 udelay(400);
1668 if (!intel_dp_get_link_status(intel_dp)) 1726 if (!intel_dp_get_link_status(intel_dp, link_status))
1669 break; 1727 break;
1670 1728
1671 /* Make sure clock is still ok */ 1729 /* Make sure clock is still ok */
1672 if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1730 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1673 intel_dp_start_link_train(intel_dp); 1731 intel_dp_start_link_train(intel_dp);
1674 cr_tries++; 1732 cr_tries++;
1675 continue; 1733 continue;
1676 } 1734 }
1677 1735
1678 if (intel_channel_eq_ok(intel_dp)) { 1736 if (intel_channel_eq_ok(intel_dp, link_status)) {
1679 channel_eq = true; 1737 channel_eq = true;
1680 break; 1738 break;
1681 } 1739 }
@@ -1690,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1690 } 1748 }
1691 1749
1692 /* Compute new intel_dp->train_set as requested by target */ 1750 /* Compute new intel_dp->train_set as requested by target */
1693 intel_get_adjust_train(intel_dp); 1751 intel_get_adjust_train(intel_dp, link_status);
1694 ++tries; 1752 ++tries;
1695 } 1753 }
1696 1754
@@ -1735,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1735 1793
1736 msleep(17); 1794 msleep(17);
1737 1795
1738 if (is_edp(intel_dp)) 1796 if (is_edp(intel_dp)) {
1739 DP |= DP_LINK_TRAIN_OFF; 1797 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1798 DP |= DP_LINK_TRAIN_OFF_CPT;
1799 else
1800 DP |= DP_LINK_TRAIN_OFF;
1801 }
1740 1802
1741 if (!HAS_PCH_CPT(dev) && 1803 if (!HAS_PCH_CPT(dev) &&
1742 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1804 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1884,7 @@ static void
1822intel_dp_check_link_status(struct intel_dp *intel_dp) 1884intel_dp_check_link_status(struct intel_dp *intel_dp)
1823{ 1885{
1824 u8 sink_irq_vector; 1886 u8 sink_irq_vector;
1887 u8 link_status[DP_LINK_STATUS_SIZE];
1825 1888
1826 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1889 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
1827 return; 1890 return;
@@ -1830,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1830 return; 1893 return;
1831 1894
1832 /* Try to read receiver status if the link appears to be up */ 1895 /* Try to read receiver status if the link appears to be up */
1833 if (!intel_dp_get_link_status(intel_dp)) { 1896 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1834 intel_dp_link_down(intel_dp); 1897 intel_dp_link_down(intel_dp);
1835 return; 1898 return;
1836 } 1899 }
@@ -1855,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1855 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 1918 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
1856 } 1919 }
1857 1920
1858 if (!intel_channel_eq_ok(intel_dp)) { 1921 if (!intel_channel_eq_ok(intel_dp, link_status)) {
1859 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1922 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
1860 drm_get_encoder_name(&intel_dp->base.base)); 1923 drm_get_encoder_name(&intel_dp->base.base));
1861 intel_dp_start_link_train(intel_dp); 1924 intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
2179 continue; 2242 continue;
2180 2243
2181 intel_dp = enc_to_intel_dp(encoder); 2244 intel_dp = enc_to_intel_dp(encoder);
2182 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) 2245 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2246 intel_dp->base.type == INTEL_OUTPUT_EDP)
2183 return intel_dp->output_reg; 2247 return intel_dp->output_reg;
2184 } 2248 }
2185 2249
@@ -2321,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2321 2385
2322 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2386 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2323 PANEL_LIGHT_ON_DELAY_SHIFT; 2387 PANEL_LIGHT_ON_DELAY_SHIFT;
2324 2388
2325 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2389 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2326 PANEL_LIGHT_OFF_DELAY_SHIFT; 2390 PANEL_LIGHT_OFF_DELAY_SHIFT;
2327 2391
@@ -2354,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2418 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2355 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2419 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2356 2420
2357 intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
2358
2359 ironlake_edp_panel_vdd_on(intel_dp); 2421 ironlake_edp_panel_vdd_on(intel_dp);
2360 ret = intel_dp_get_dpcd(intel_dp); 2422 ret = intel_dp_get_dpcd(intel_dp);
2361 ironlake_edp_panel_vdd_off(intel_dp, false); 2423 ironlake_edp_panel_vdd_off(intel_dp, false);
2424
2362 if (ret) { 2425 if (ret) {
2363 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2426 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2364 dev_priv->no_aux_handshake = 2427 dev_priv->no_aux_handshake =
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 499d4c0dbeeb..21f60b7d69a3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
326static int intel_panel_get_brightness(struct backlight_device *bd) 326static int intel_panel_get_brightness(struct backlight_device *bd)
327{ 327{
328 struct drm_device *dev = bl_get_data(bd); 328 struct drm_device *dev = bl_get_data(bd);
329 return intel_panel_get_backlight(dev); 329 struct drm_i915_private *dev_priv = dev->dev_private;
330 return dev_priv->backlight_level;
330} 331}
331 332
332static const struct backlight_ops intel_panel_bl_ops = { 333static const struct backlight_ops intel_panel_bl_ops = {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 032a82098136..5fc201b49d30 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -640,10 +640,9 @@ static int
640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk) 640nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
641{ 641{
642 struct drm_nouveau_private *dev_priv = dev->dev_private; 642 struct drm_nouveau_private *dev_priv = dev->dev_private;
643 uint32_t reg0 = nv_rd32(dev, reg + 0);
644 uint32_t reg1 = nv_rd32(dev, reg + 4);
645 struct nouveau_pll_vals pll; 643 struct nouveau_pll_vals pll;
646 struct pll_lims pll_limits; 644 struct pll_lims pll_limits;
645 u32 ctrl, mask, coef;
647 int ret; 646 int ret;
648 647
649 ret = get_pll_limits(dev, reg, &pll_limits); 648 ret = get_pll_limits(dev, reg, &pll_limits);
@@ -654,15 +653,20 @@ nv50_pll_set(struct drm_device *dev, uint32_t reg, uint32_t clk)
654 if (!clk) 653 if (!clk)
655 return -ERANGE; 654 return -ERANGE;
656 655
657 reg0 = (reg0 & 0xfff8ffff) | (pll.log2P << 16); 656 coef = pll.N1 << 8 | pll.M1;
658 reg1 = (reg1 & 0xffff0000) | (pll.N1 << 8) | pll.M1; 657 ctrl = pll.log2P << 16;
659 658 mask = 0x00070000;
660 if (dev_priv->vbios.execute) { 659 if (reg == 0x004008) {
661 still_alive(); 660 mask |= 0x01f80000;
662 nv_wr32(dev, reg + 4, reg1); 661 ctrl |= (pll_limits.log2p_bias << 19);
663 nv_wr32(dev, reg + 0, reg0); 662 ctrl |= (pll.log2P << 22);
664 } 663 }
665 664
665 if (!dev_priv->vbios.execute)
666 return 0;
667
668 nv_mask(dev, reg + 0, mask, ctrl);
669 nv_wr32(dev, reg + 4, coef);
666 return 0; 670 return 0;
667} 671}
668 672
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7226f419e178..7cc37e690860 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -148,7 +148,7 @@ set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
148 148
149 if (dev_priv->card_type == NV_10 && 149 if (dev_priv->card_type == NV_10 &&
150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && 150 nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
151 nvbo->bo.mem.num_pages < vram_pages / 2) { 151 nvbo->bo.mem.num_pages < vram_pages / 4) {
152 /* 152 /*
153 * Make sure that the color and depth buffers are handled 153 * Make sure that the color and depth buffers are handled
154 * by independent memory controller units. Up to a 9x 154 * by independent memory controller units. Up to a 9x
diff --git a/drivers/gpu/drm/nouveau/nouveau_channel.c b/drivers/gpu/drm/nouveau/nouveau_channel.c
index a319d5646ea9..bb6ec9ef8676 100644
--- a/drivers/gpu/drm/nouveau/nouveau_channel.c
+++ b/drivers/gpu/drm/nouveau/nouveau_channel.c
@@ -158,6 +158,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait); 158 INIT_LIST_HEAD(&chan->nvsw.vbl_wait);
159 INIT_LIST_HEAD(&chan->nvsw.flip); 159 INIT_LIST_HEAD(&chan->nvsw.flip);
160 INIT_LIST_HEAD(&chan->fence.pending); 160 INIT_LIST_HEAD(&chan->fence.pending);
161 spin_lock_init(&chan->fence.lock);
161 162
162 /* setup channel's memory and vm */ 163 /* setup channel's memory and vm */
163 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); 164 ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index e0d275e1c96c..cea6696b1906 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -710,7 +710,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
710 case OUTPUT_DP: 710 case OUTPUT_DP:
711 max_clock = nv_encoder->dp.link_nr; 711 max_clock = nv_encoder->dp.link_nr;
712 max_clock *= nv_encoder->dp.link_bw; 712 max_clock *= nv_encoder->dp.link_bw;
713 clock = clock * nouveau_connector_bpp(connector) / 8; 713 clock = clock * nouveau_connector_bpp(connector) / 10;
714 break; 714 break;
715 default: 715 default:
716 BUG_ON(1); 716 BUG_ON(1);
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index ddbabefb4273..b12fd2c80812 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -369,3 +369,48 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
369 spin_unlock_irqrestore(&dev->event_lock, flags); 369 spin_unlock_irqrestore(&dev->event_lock, flags);
370 return 0; 370 return 0;
371} 371}
372
373int
374nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
375 struct drm_mode_create_dumb *args)
376{
377 struct nouveau_bo *bo;
378 int ret;
379
380 args->pitch = roundup(args->width * (args->bpp / 8), 256);
381 args->size = args->pitch * args->height;
382 args->size = roundup(args->size, PAGE_SIZE);
383
384 ret = nouveau_gem_new(dev, args->size, 0, TTM_PL_FLAG_VRAM, 0, 0, &bo);
385 if (ret)
386 return ret;
387
388 ret = drm_gem_handle_create(file_priv, bo->gem, &args->handle);
389 drm_gem_object_unreference_unlocked(bo->gem);
390 return ret;
391}
392
393int
394nouveau_display_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
395 uint32_t handle)
396{
397 return drm_gem_handle_delete(file_priv, handle);
398}
399
400int
401nouveau_display_dumb_map_offset(struct drm_file *file_priv,
402 struct drm_device *dev,
403 uint32_t handle, uint64_t *poffset)
404{
405 struct drm_gem_object *gem;
406
407 gem = drm_gem_object_lookup(dev, file_priv, handle);
408 if (gem) {
409 struct nouveau_bo *bo = gem->driver_private;
410 *poffset = bo->bo.addr_space_offset;
411 drm_gem_object_unreference_unlocked(gem);
412 return 0;
413 }
414
415 return -ENOENT;
416}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 9f7bb1295262..9791d13c9e3b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -433,6 +433,10 @@ static struct drm_driver driver = {
433 .gem_open_object = nouveau_gem_object_open, 433 .gem_open_object = nouveau_gem_object_open,
434 .gem_close_object = nouveau_gem_object_close, 434 .gem_close_object = nouveau_gem_object_close,
435 435
436 .dumb_create = nouveau_display_dumb_create,
437 .dumb_map_offset = nouveau_display_dumb_map_offset,
438 .dumb_destroy = nouveau_display_dumb_destroy,
439
436 .name = DRIVER_NAME, 440 .name = DRIVER_NAME,
437 .desc = DRIVER_DESC, 441 .desc = DRIVER_DESC,
438#ifdef GIT_REVISION 442#ifdef GIT_REVISION
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 29837da1098b..4c0be3a4ed88 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -1418,6 +1418,12 @@ int nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
1418 struct drm_pending_vblank_event *event); 1418 struct drm_pending_vblank_event *event);
1419int nouveau_finish_page_flip(struct nouveau_channel *, 1419int nouveau_finish_page_flip(struct nouveau_channel *,
1420 struct nouveau_page_flip_state *); 1420 struct nouveau_page_flip_state *);
1421int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
1422 struct drm_mode_create_dumb *args);
1423int nouveau_display_dumb_map_offset(struct drm_file *, struct drm_device *,
1424 uint32_t handle, uint64_t *offset);
1425int nouveau_display_dumb_destroy(struct drm_file *, struct drm_device *,
1426 uint32_t handle);
1421 1427
1422/* nv10_gpio.c */ 1428/* nv10_gpio.c */
1423int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag); 1429int nv10_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 14a8627efe4d..3a4cc32b9e44 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -487,6 +487,7 @@ int nouveau_fbcon_init(struct drm_device *dev)
487{ 487{
488 struct drm_nouveau_private *dev_priv = dev->dev_private; 488 struct drm_nouveau_private *dev_priv = dev->dev_private;
489 struct nouveau_fbdev *nfbdev; 489 struct nouveau_fbdev *nfbdev;
490 int preferred_bpp;
490 int ret; 491 int ret;
491 492
492 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL); 493 nfbdev = kzalloc(sizeof(struct nouveau_fbdev), GFP_KERNEL);
@@ -505,7 +506,15 @@ int nouveau_fbcon_init(struct drm_device *dev)
505 } 506 }
506 507
507 drm_fb_helper_single_add_all_connectors(&nfbdev->helper); 508 drm_fb_helper_single_add_all_connectors(&nfbdev->helper);
508 drm_fb_helper_initial_config(&nfbdev->helper, 32); 509
510 if (dev_priv->vram_size <= 32 * 1024 * 1024)
511 preferred_bpp = 8;
512 else if (dev_priv->vram_size <= 64 * 1024 * 1024)
513 preferred_bpp = 16;
514 else
515 preferred_bpp = 32;
516
517 drm_fb_helper_initial_config(&nfbdev->helper, preferred_bpp);
509 return 0; 518 return 0;
510} 519}
511 520
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 81116cfea275..2f6daae68b9d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -539,8 +539,6 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
539 return ret; 539 return ret;
540 } 540 }
541 541
542 INIT_LIST_HEAD(&chan->fence.pending);
543 spin_lock_init(&chan->fence.lock);
544 atomic_set(&chan->fence.last_sequence_irq, 0); 542 atomic_set(&chan->fence.last_sequence_irq, 0);
545 return 0; 543 return 0;
546} 544}
diff --git a/drivers/gpu/drm/nouveau/nouveau_i2c.c b/drivers/gpu/drm/nouveau/nouveau_i2c.c
index c6143df48b9f..d39b2202b197 100644
--- a/drivers/gpu/drm/nouveau/nouveau_i2c.c
+++ b/drivers/gpu/drm/nouveau/nouveau_i2c.c
@@ -333,7 +333,7 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
333 333
334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index); 334 NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
335 335
336 for (i = 0; info[i].addr; i++) { 336 for (i = 0; i2c && info[i].addr; i++) {
337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) && 337 if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
338 (!match || match(i2c, &info[i]))) { 338 (!match || match(i2c, &info[i]))) {
339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type); 339 NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
diff --git a/drivers/gpu/drm/nouveau/nouveau_object.c b/drivers/gpu/drm/nouveau/nouveau_object.c
index 02222c540aee..960c0ae0c0c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_object.c
+++ b/drivers/gpu/drm/nouveau/nouveau_object.c
@@ -680,7 +680,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
680 return ret; 680 return ret;
681 } 681 }
682 682
683 ret = drm_mm_init(&chan->ramin_heap, base, size); 683 ret = drm_mm_init(&chan->ramin_heap, base, size - base);
684 if (ret) { 684 if (ret) {
685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret); 685 NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
686 nouveau_gpuobj_ref(NULL, &chan->ramin); 686 nouveau_gpuobj_ref(NULL, &chan->ramin);
diff --git a/drivers/gpu/drm/nouveau/nouveau_perf.c b/drivers/gpu/drm/nouveau/nouveau_perf.c
index 9f178aa94162..33d03fbf00df 100644
--- a/drivers/gpu/drm/nouveau/nouveau_perf.c
+++ b/drivers/gpu/drm/nouveau/nouveau_perf.c
@@ -239,7 +239,7 @@ nouveau_perf_init(struct drm_device *dev)
239 if(version == 0x15) { 239 if(version == 0x15) {
240 memtimings->timing = 240 memtimings->timing =
241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL); 241 kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
242 if(!memtimings) { 242 if (!memtimings->timing) {
243 NV_WARN(dev,"Could not allocate memtiming table\n"); 243 NV_WARN(dev,"Could not allocate memtiming table\n");
244 return; 244 return;
245 } 245 }
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
index b75258a9fe44..c8a463b76c89 100644
--- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c
@@ -67,7 +67,10 @@ nouveau_sgdma_clear(struct ttm_backend *be)
67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages], 67 pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); 68 PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
69 } 69 }
70 nvbe->unmap_pages = false;
70 } 71 }
72
73 nvbe->pages = NULL;
71} 74}
72 75
73static void 76static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index 82478e0998e5..d8831ab42bb9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -579,6 +579,14 @@ nouveau_card_init(struct drm_device *dev)
579 if (ret) 579 if (ret)
580 goto out_display_early; 580 goto out_display_early;
581 581
582 /* workaround an odd issue on nvc1 by disabling the device's
583 * nosnoop capability. hopefully won't cause issues until a
584 * better fix is found - assuming there is one...
585 */
586 if (dev_priv->chipset == 0xc1) {
587 nv_mask(dev, 0x00088080, 0x00000800, 0x00000000);
588 }
589
582 nouveau_pm_init(dev); 590 nouveau_pm_init(dev);
583 591
584 ret = engine->vram.init(dev); 592 ret = engine->vram.init(dev);
@@ -1102,12 +1110,13 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
1102 dev_priv->noaccel = !!nouveau_noaccel; 1110 dev_priv->noaccel = !!nouveau_noaccel;
1103 if (nouveau_noaccel == -1) { 1111 if (nouveau_noaccel == -1) {
1104 switch (dev_priv->chipset) { 1112 switch (dev_priv->chipset) {
1105 case 0xc1: /* known broken */ 1113#if 0
1106 case 0xc8: /* never tested */ 1114 case 0xXX: /* known broken */
1107 NV_INFO(dev, "acceleration disabled by default, pass " 1115 NV_INFO(dev, "acceleration disabled by default, pass "
1108 "noaccel=0 to force enable\n"); 1116 "noaccel=0 to force enable\n");
1109 dev_priv->noaccel = true; 1117 dev_priv->noaccel = true;
1110 break; 1118 break;
1119#endif
1111 default: 1120 default:
1112 dev_priv->noaccel = false; 1121 dev_priv->noaccel = false;
1113 break; 1122 break;
diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c
index bbc0b9c7e1f7..e676b0d53478 100644
--- a/drivers/gpu/drm/nouveau/nv40_pm.c
+++ b/drivers/gpu/drm/nouveau/nv40_pm.c
@@ -57,12 +57,14 @@ read_pll_2(struct drm_device *dev, u32 reg)
57 int P = (ctrl & 0x00070000) >> 16; 57 int P = (ctrl & 0x00070000) >> 16;
58 u32 ref = 27000, clk = 0; 58 u32 ref = 27000, clk = 0;
59 59
60 if (ctrl & 0x80000000) 60 if ((ctrl & 0x80000000) && M1) {
61 clk = ref * N1 / M1; 61 clk = ref * N1 / M1;
62 62 if ((ctrl & 0x40000100) == 0x40000000) {
63 if (!(ctrl & 0x00000100)) { 63 if (M2)
64 if (ctrl & 0x40000000) 64 clk = clk * N2 / M2;
65 clk = clk * N2 / M2; 65 else
66 clk = 0;
67 }
66 } 68 }
67 69
68 return clk >> P; 70 return clk >> P;
@@ -177,6 +179,11 @@ nv40_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
177 } 179 }
178 180
179 /* memory clock */ 181 /* memory clock */
182 if (!perflvl->memory) {
183 info->mpll_ctrl = 0x00000000;
184 goto out;
185 }
186
180 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory, 187 ret = nv40_calc_pll(dev, 0x004020, &pll, perflvl->memory,
181 &N1, &M1, &N2, &M2, &log2P); 188 &N1, &M1, &N2, &M2, &log2P);
182 if (ret < 0) 189 if (ret < 0)
@@ -264,6 +271,9 @@ nv40_pm_clocks_set(struct drm_device *dev, void *pre_state)
264 mdelay(5); 271 mdelay(5);
265 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl); 272 nv_mask(dev, 0x00c040, 0x00000333, info->ctrl);
266 273
274 if (!info->mpll_ctrl)
275 goto resume;
276
267 /* wait for vblank start on active crtcs, disable memory access */ 277 /* wait for vblank start on active crtcs, disable memory access */
268 for (i = 0; i < 2; i++) { 278 for (i = 0; i < 2; i++) {
269 if (!(crtc_mask & (1 << i))) 279 if (!(crtc_mask & (1 << i)))
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index d23ca00e7d62..06de250fe617 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -616,7 +616,7 @@ nv50_display_unk10_handler(struct drm_device *dev)
616 struct drm_nouveau_private *dev_priv = dev->dev_private; 616 struct drm_nouveau_private *dev_priv = dev->dev_private;
617 struct nv50_display *disp = nv50_display(dev); 617 struct nv50_display *disp = nv50_display(dev);
618 u32 unk30 = nv_rd32(dev, 0x610030), mc; 618 u32 unk30 = nv_rd32(dev, 0x610030), mc;
619 int i, crtc, or, type = OUTPUT_ANY; 619 int i, crtc, or = 0, type = OUTPUT_ANY;
620 620
621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 621 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
622 disp->irq.dcb = NULL; 622 disp->irq.dcb = NULL;
@@ -708,7 +708,7 @@ nv50_display_unk20_handler(struct drm_device *dev)
708 struct nv50_display *disp = nv50_display(dev); 708 struct nv50_display *disp = nv50_display(dev);
709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0; 709 u32 unk30 = nv_rd32(dev, 0x610030), tmp, pclk, script, mc = 0;
710 struct dcb_entry *dcb; 710 struct dcb_entry *dcb;
711 int i, crtc, or, type = OUTPUT_ANY; 711 int i, crtc, or = 0, type = OUTPUT_ANY;
712 712
713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30); 713 NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
714 dcb = disp->irq.dcb; 714 dcb = disp->irq.dcb;
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 8c979b31ff61..ac601f7c4e1a 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -131,8 +131,8 @@ nv50_graph_init(struct drm_device *dev, int engine)
131 NV_DEBUG(dev, "\n"); 131 NV_DEBUG(dev, "\n");
132 132
133 /* master reset */ 133 /* master reset */
134 nv_mask(dev, 0x000200, 0x00200100, 0x00000000); 134 nv_mask(dev, 0x000200, 0x00201000, 0x00000000);
135 nv_mask(dev, 0x000200, 0x00200100, 0x00200100); 135 nv_mask(dev, 0x000200, 0x00201000, 0x00201000);
136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */ 136 nv_wr32(dev, 0x40008c, 0x00000004); /* HW_CTX_SWITCH_ENABLED */
137 137
138 /* reset/enable traps and interrupts */ 138 /* reset/enable traps and interrupts */
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d05c2c3b2444..4b46d6968566 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -601,7 +601,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
601 gr_def(ctx, offset + 0x1c, 0x00880000); 601 gr_def(ctx, offset + 0x1c, 0x00880000);
602 break; 602 break;
603 case 0x86: 603 case 0x86:
604 gr_def(ctx, offset + 0x1c, 0x008c0000); 604 gr_def(ctx, offset + 0x1c, 0x018c0000);
605 break; 605 break;
606 case 0x92: 606 case 0x92:
607 case 0x96: 607 case 0x96:
diff --git a/drivers/gpu/drm/nouveau/nv50_vram.c b/drivers/gpu/drm/nouveau/nv50_vram.c
index 9da23838e63e..2e45e57fd869 100644
--- a/drivers/gpu/drm/nouveau/nv50_vram.c
+++ b/drivers/gpu/drm/nouveau/nv50_vram.c
@@ -160,7 +160,7 @@ nv50_vram_rblock(struct drm_device *dev)
160 colbits = (r4 & 0x0000f000) >> 12; 160 colbits = (r4 & 0x0000f000) >> 12;
161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8; 161 rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8; 162 rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
163 banks = ((r4 & 0x01000000) ? 8 : 4); 163 banks = 1 << (((r4 & 0x03000000) >> 24) + 2);
164 164
165 rowsize = parts * banks * (1 << colbits) * 8; 165 rowsize = parts * banks * (1 << colbits) * 8;
166 predicted = rowsize << rowbitsa; 166 predicted = rowsize << rowbitsa;
diff --git a/drivers/gpu/drm/nouveau/nvc0_graph.c b/drivers/gpu/drm/nouveau/nvc0_graph.c
index bbdbc51830c8..ecfafd70cf0e 100644
--- a/drivers/gpu/drm/nouveau/nvc0_graph.c
+++ b/drivers/gpu/drm/nouveau/nvc0_graph.c
@@ -157,8 +157,8 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR); 157 struct nvc0_graph_priv *priv = nv_engine(chan->dev, NVOBJ_ENGINE_GR);
158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR]; 158 struct nvc0_graph_chan *grch = chan->engctx[NVOBJ_ENGINE_GR];
159 struct drm_device *dev = chan->dev; 159 struct drm_device *dev = chan->dev;
160 struct drm_nouveau_private *dev_priv = dev->dev_private;
160 int i = 0, gpc, tp, ret; 161 int i = 0, gpc, tp, ret;
161 u32 magic;
162 162
163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM, 163 ret = nouveau_gpuobj_new(dev, chan, 0x2000, 256, NVOBJ_FLAG_VM,
164 &grch->unk408004); 164 &grch->unk408004);
@@ -207,14 +207,37 @@ nvc0_graph_create_context_mmio_list(struct nouveau_channel *chan)
207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c); 207 nv_wo32(grch->mmio, i++ * 4, 0x0041880c);
208 nv_wo32(grch->mmio, i++ * 4, 0x80000018); 208 nv_wo32(grch->mmio, i++ * 4, 0x80000018);
209 209
210 magic = 0x02180000; 210 if (dev_priv->chipset != 0xc1) {
211 nv_wo32(grch->mmio, i++ * 4, 0x00405830); 211 u32 magic = 0x02180000;
212 nv_wo32(grch->mmio, i++ * 4, magic); 212 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
213 for (gpc = 0; gpc < priv->gpc_nr; gpc++) { 213 nv_wo32(grch->mmio, i++ * 4, magic);
214 for (tp = 0; tp < priv->tp_nr[gpc]; tp++, magic += 0x0324) { 214 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
215 u32 reg = 0x504520 + (gpc * 0x8000) + (tp * 0x0800); 215 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
216 nv_wo32(grch->mmio, i++ * 4, reg); 216 u32 reg = TP_UNIT(gpc, tp, 0x520);
217 nv_wo32(grch->mmio, i++ * 4, magic); 217 nv_wo32(grch->mmio, i++ * 4, reg);
218 nv_wo32(grch->mmio, i++ * 4, magic);
219 magic += 0x0324;
220 }
221 }
222 } else {
223 u32 magic = 0x02180000;
224 nv_wo32(grch->mmio, i++ * 4, 0x00405830);
225 nv_wo32(grch->mmio, i++ * 4, magic | 0x0000218);
226 nv_wo32(grch->mmio, i++ * 4, 0x004064c4);
227 nv_wo32(grch->mmio, i++ * 4, 0x0086ffff);
228 for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
229 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
230 u32 reg = TP_UNIT(gpc, tp, 0x520);
231 nv_wo32(grch->mmio, i++ * 4, reg);
232 nv_wo32(grch->mmio, i++ * 4, (1 << 28) | magic);
233 magic += 0x0324;
234 }
235 for (tp = 0; tp < priv->tp_nr[gpc]; tp++) {
236 u32 reg = TP_UNIT(gpc, tp, 0x544);
237 nv_wo32(grch->mmio, i++ * 4, reg);
238 nv_wo32(grch->mmio, i++ * 4, magic);
239 magic += 0x0324;
240 }
218 } 241 }
219 } 242 }
220 243
@@ -358,6 +381,8 @@ nvc0_graph_init_gpc_0(struct drm_device *dev)
358 u8 tpnr[GPC_MAX]; 381 u8 tpnr[GPC_MAX];
359 int i, gpc, tpc; 382 int i, gpc, tpc;
360 383
384 nv_wr32(dev, TP_UNIT(0, 0, 0x5c), 1); /* affects TFB offset queries */
385
361 /* 386 /*
362 * TP ROP UNKVAL(magic_not_rop_nr) 387 * TP ROP UNKVAL(magic_not_rop_nr)
363 * 450: 4/0/0/0 2 3 388 * 450: 4/0/0/0 2 3
diff --git a/drivers/gpu/drm/nouveau/nvc0_grctx.c b/drivers/gpu/drm/nouveau/nvc0_grctx.c
index dd0e6a736b3b..96b0b93d94ca 100644
--- a/drivers/gpu/drm/nouveau/nvc0_grctx.c
+++ b/drivers/gpu/drm/nouveau/nvc0_grctx.c
@@ -1812,6 +1812,7 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1812 /* calculate first set of magics */ 1812 /* calculate first set of magics */
1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1813 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1814 1814
1815 gpc = -1;
1815 for (tp = 0; tp < priv->tp_total; tp++) { 1816 for (tp = 0; tp < priv->tp_total; tp++) {
1816 do { 1817 do {
1817 gpc = (gpc + 1) % priv->gpc_nr; 1818 gpc = (gpc + 1) % priv->gpc_nr;
@@ -1861,30 +1862,26 @@ nvc0_grctx_generate(struct nouveau_channel *chan)
1861 1862
1862 if (1) { 1863 if (1) {
1863 u32 tp_mask = 0, tp_set = 0; 1864 u32 tp_mask = 0, tp_set = 0;
1864 u8 tpnr[GPC_MAX]; 1865 u8 tpnr[GPC_MAX], a, b;
1865 1866
1866 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr)); 1867 memcpy(tpnr, priv->tp_nr, sizeof(priv->tp_nr));
1867 for (gpc = 0; gpc < priv->gpc_nr; gpc++) 1868 for (gpc = 0; gpc < priv->gpc_nr; gpc++)
1868 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8); 1869 tp_mask |= ((1 << priv->tp_nr[gpc]) - 1) << (gpc * 8);
1869 1870
1870 gpc = -1; 1871 for (i = 0, gpc = -1, b = -1; i < 32; i++) {
1871 for (i = 0, gpc = -1; i < 32; i++) { 1872 a = (i * (priv->tp_total - 1)) / 32;
1872 int ltp = i * (priv->tp_total - 1) / 32; 1873 if (a != b) {
1873 1874 b = a;
1874 do { 1875 do {
1875 gpc = (gpc + 1) % priv->gpc_nr; 1876 gpc = (gpc + 1) % priv->gpc_nr;
1876 } while (!tpnr[gpc]); 1877 } while (!tpnr[gpc]);
1877 tp = priv->tp_nr[gpc] - tpnr[gpc]--; 1878 tp = priv->tp_nr[gpc] - tpnr[gpc]--;
1878 1879
1879 tp_set |= 1 << ((gpc * 8) + tp); 1880 tp_set |= 1 << ((gpc * 8) + tp);
1881 }
1880 1882
1881 do { 1883 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set);
1882 nv_wr32(dev, 0x406800 + (i * 0x20), tp_set); 1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set ^ tp_mask);
1883 tp_set ^= tp_mask;
1884 nv_wr32(dev, 0x406c00 + (i * 0x20), tp_set);
1885 tp_set ^= tp_mask;
1886 } while (ltp == (++i * (priv->tp_total - 1) / 32));
1887 i--;
1888 } 1885 }
1889 } 1886 }
1890 1887
diff --git a/drivers/gpu/drm/nouveau/nvc0_vram.c b/drivers/gpu/drm/nouveau/nvc0_vram.c
index edbfe9360ae2..ce984d573a51 100644
--- a/drivers/gpu/drm/nouveau/nvc0_vram.c
+++ b/drivers/gpu/drm/nouveau/nvc0_vram.c
@@ -43,7 +43,7 @@ static const u8 types[256] = {
43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 43 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0, 44 0, 0, 0, 3, 3, 3, 3, 1, 1, 1, 1, 0, 0, 0, 0, 0,
45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 45 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3,
46 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3, 46 3, 3, 3, 1, 0, 0, 0, 0, 0, 0, 0, 0, 3, 3, 3, 3,
47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3, 47 3, 3, 0, 0, 0, 0, 0, 0, 3, 0, 0, 3, 0, 3, 0, 3,
48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0, 48 3, 0, 3, 3, 3, 3, 3, 0, 0, 3, 0, 3, 0, 3, 3, 0,
49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0 49 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 1, 1, 0
@@ -110,22 +110,26 @@ nvc0_vram_init(struct drm_device *dev)
110 u32 bsize = nv_rd32(dev, 0x10f20c); 110 u32 bsize = nv_rd32(dev, 0x10f20c);
111 u32 offset, length; 111 u32 offset, length;
112 bool uniform = true; 112 bool uniform = true;
113 int ret, i; 113 int ret, part;
114 114
115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800)); 115 NV_DEBUG(dev, "0x100800: 0x%08x\n", nv_rd32(dev, 0x100800));
116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize); 116 NV_DEBUG(dev, "parts 0x%08x bcast_mem_amount 0x%08x\n", parts, bsize);
117 117
118 /* read amount of vram attached to each memory controller */ 118 /* read amount of vram attached to each memory controller */
119 for (i = 0; i < parts; i++) { 119 part = 0;
120 u32 psize = nv_rd32(dev, 0x11020c + (i * 0x1000)); 120 while (parts) {
121 u32 psize = nv_rd32(dev, 0x11020c + (part++ * 0x1000));
122 if (psize == 0)
123 continue;
124 parts--;
125
121 if (psize != bsize) { 126 if (psize != bsize) {
122 if (psize < bsize) 127 if (psize < bsize)
123 bsize = psize; 128 bsize = psize;
124 uniform = false; 129 uniform = false;
125 } 130 }
126 131
127 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", i, psize); 132 NV_DEBUG(dev, "%d: mem_amount 0x%08x\n", part, psize);
128
129 dev_priv->vram_size += (u64)psize << 20; 133 dev_priv->vram_size += (u64)psize << 20;
130 } 134 }
131 135
diff --git a/drivers/gpu/drm/nouveau/nvd0_display.c b/drivers/gpu/drm/nouveau/nvd0_display.c
index 23d63b4b3d77..cb006a718e70 100644
--- a/drivers/gpu/drm/nouveau/nvd0_display.c
+++ b/drivers/gpu/drm/nouveau/nvd0_display.c
@@ -780,7 +780,7 @@ nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
780 continue; 780 continue;
781 781
782 if (nv_partner != nv_encoder && 782 if (nv_partner != nv_encoder &&
783 nv_partner->dcb->or == nv_encoder->or) { 783 nv_partner->dcb->or == nv_encoder->dcb->or) {
784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON) 784 if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
785 return; 785 return;
786 break; 786 break;
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 87921c88a95c..2b97262e3ab1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1107,9 +1107,40 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1107 return -EINVAL; 1107 return -EINVAL;
1108 } 1108 }
1109 1109
1110 if (tiling_flags & RADEON_TILING_MACRO) 1110 if (tiling_flags & RADEON_TILING_MACRO) {
1111 if (rdev->family >= CHIP_CAYMAN)
1112 tmp = rdev->config.cayman.tile_config;
1113 else
1114 tmp = rdev->config.evergreen.tile_config;
1115
1116 switch ((tmp & 0xf0) >> 4) {
1117 case 0: /* 4 banks */
1118 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
1119 break;
1120 case 1: /* 8 banks */
1121 default:
1122 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
1123 break;
1124 case 2: /* 16 banks */
1125 fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
1126 break;
1127 }
1128
1129 switch ((tmp & 0xf000) >> 12) {
1130 case 0: /* 1KB rows */
1131 default:
1132 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB);
1133 break;
1134 case 1: /* 2KB rows */
1135 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB);
1136 break;
1137 case 2: /* 4KB rows */
1138 fb_format |= EVERGREEN_GRPH_TILE_SPLIT(EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB);
1139 break;
1140 }
1141
1111 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1); 1142 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
1112 else if (tiling_flags & RADEON_TILING_MICRO) 1143 } else if (tiling_flags & RADEON_TILING_MICRO)
1113 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1144 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1114 1145
1115 switch (radeon_crtc->crtc_id) { 1146 switch (radeon_crtc->crtc_id) {
@@ -1522,12 +1553,6 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
1522 struct drm_display_mode *mode, 1553 struct drm_display_mode *mode,
1523 struct drm_display_mode *adjusted_mode) 1554 struct drm_display_mode *adjusted_mode)
1524{ 1555{
1525 struct drm_device *dev = crtc->dev;
1526 struct radeon_device *rdev = dev->dev_private;
1527
1528 /* adjust pm to upcoming mode change */
1529 radeon_pm_compute_clocks(rdev);
1530
1531 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 1556 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1532 return false; 1557 return false;
1533 return true; 1558 return true;
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index a0de48542f71..6fb335a4fdda 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -283,7 +283,7 @@ int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
283 } 283 }
284 } 284 }
285 285
286 DRM_ERROR("aux i2c too many retries, giving up\n"); 286 DRM_DEBUG_KMS("aux i2c too many retries, giving up\n");
287 return -EREMOTEIO; 287 return -EREMOTEIO;
288} 288}
289 289
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e4c384b9511c..5e00d1670aa9 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -82,6 +82,7 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
82{ 82{
83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 83 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset); 84 u32 tmp = RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset);
85 int i;
85 86
86 /* Lock the graphics update lock */ 87 /* Lock the graphics update lock */
87 tmp |= EVERGREEN_GRPH_UPDATE_LOCK; 88 tmp |= EVERGREEN_GRPH_UPDATE_LOCK;
@@ -99,7 +100,11 @@ u32 evergreen_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
99 (u32)crtc_base); 100 (u32)crtc_base);
100 101
101 /* Wait for update_pending to go high. */ 102 /* Wait for update_pending to go high. */
102 while (!(RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)); 103 for (i = 0; i < rdev->usec_timeout; i++) {
104 if (RREG32(EVERGREEN_GRPH_UPDATE + radeon_crtc->crtc_offset) & EVERGREEN_GRPH_SURFACE_UPDATE_PENDING)
105 break;
106 udelay(1);
107 }
103 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 108 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
104 109
105 /* Unlock the lock, so double-buffering can take place inside vblank */ 110 /* Unlock the lock, so double-buffering can take place inside vblank */
@@ -157,6 +162,57 @@ int sumo_get_temp(struct radeon_device *rdev)
157 return actual_temp * 1000; 162 return actual_temp * 1000;
158} 163}
159 164
165void sumo_pm_init_profile(struct radeon_device *rdev)
166{
167 int idx;
168
169 /* default */
170 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_ps_idx = rdev->pm.default_power_state_index;
171 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_ps_idx = rdev->pm.default_power_state_index;
172 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
173 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 0;
174
175 /* low,mid sh/mh */
176 if (rdev->flags & RADEON_IS_MOBILITY)
177 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
178 else
179 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
180
181 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
182 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
183 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
184 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
185
186 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
187 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
188 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
189 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
190
191 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
192 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
193 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
194 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 0;
195
196 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
197 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
198 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
199 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 0;
200
201 /* high sh/mh */
202 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
203 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
204 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
205 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
206 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx =
207 rdev->pm.power_state[idx].num_clock_modes - 1;
208
209 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
210 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
211 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
212 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx =
213 rdev->pm.power_state[idx].num_clock_modes - 1;
214}
215
160void evergreen_pm_misc(struct radeon_device *rdev) 216void evergreen_pm_misc(struct radeon_device *rdev)
161{ 217{
162 int req_ps_idx = rdev->pm.requested_power_state_index; 218 int req_ps_idx = rdev->pm.requested_power_state_index;
@@ -1219,7 +1275,7 @@ void evergreen_mc_program(struct radeon_device *rdev)
1219 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR, 1275 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR,
1220 rdev->mc.vram_end >> 12); 1276 rdev->mc.vram_end >> 12);
1221 } 1277 }
1222 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, 0); 1278 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, rdev->vram_scratch.gpu_addr >> 12);
1223 if (rdev->flags & RADEON_IS_IGP) { 1279 if (rdev->flags & RADEON_IS_IGP) {
1224 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF; 1280 tmp = RREG32(MC_FUS_VM_FB_OFFSET) & 0x000FFFFF;
1225 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24; 1281 tmp |= ((rdev->mc.vram_end >> 20) & 0xF) << 24;
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea7570..cd4590aae154 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -38,6 +38,7 @@ struct evergreen_cs_track {
38 u32 group_size; 38 u32 group_size;
39 u32 nbanks; 39 u32 nbanks;
40 u32 npipes; 40 u32 npipes;
41 u32 row_size;
41 /* value we track */ 42 /* value we track */
42 u32 nsamples; 43 u32 nsamples;
43 u32 cb_color_base_last[12]; 44 u32 cb_color_base_last[12];
@@ -77,6 +78,44 @@ struct evergreen_cs_track {
77 struct radeon_bo *db_s_write_bo; 78 struct radeon_bo *db_s_write_bo;
78}; 79};
79 80
81static u32 evergreen_cs_get_aray_mode(u32 tiling_flags)
82{
83 if (tiling_flags & RADEON_TILING_MACRO)
84 return ARRAY_2D_TILED_THIN1;
85 else if (tiling_flags & RADEON_TILING_MICRO)
86 return ARRAY_1D_TILED_THIN1;
87 else
88 return ARRAY_LINEAR_GENERAL;
89}
90
91static u32 evergreen_cs_get_num_banks(u32 nbanks)
92{
93 switch (nbanks) {
94 case 2:
95 return ADDR_SURF_2_BANK;
96 case 4:
97 return ADDR_SURF_4_BANK;
98 case 8:
99 default:
100 return ADDR_SURF_8_BANK;
101 case 16:
102 return ADDR_SURF_16_BANK;
103 }
104}
105
106static u32 evergreen_cs_get_tile_split(u32 row_size)
107{
108 switch (row_size) {
109 case 1:
110 default:
111 return ADDR_SURF_TILE_SPLIT_1KB;
112 case 2:
113 return ADDR_SURF_TILE_SPLIT_2KB;
114 case 4:
115 return ADDR_SURF_TILE_SPLIT_4KB;
116 }
117}
118
80static void evergreen_cs_track_init(struct evergreen_cs_track *track) 119static void evergreen_cs_track_init(struct evergreen_cs_track *track)
81{ 120{
82 int i; 121 int i;
@@ -480,21 +519,22 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
480 } 519 }
481 break; 520 break;
482 case DB_Z_INFO: 521 case DB_Z_INFO:
483 r = evergreen_cs_packet_next_reloc(p, &reloc);
484 if (r) {
485 dev_warn(p->dev, "bad SET_CONTEXT_REG "
486 "0x%04X\n", reg);
487 return -EINVAL;
488 }
489 track->db_z_info = radeon_get_ib_value(p, idx); 522 track->db_z_info = radeon_get_ib_value(p, idx);
490 ib[idx] &= ~Z_ARRAY_MODE(0xf); 523 if (!p->keep_tiling_flags) {
491 track->db_z_info &= ~Z_ARRAY_MODE(0xf); 524 r = evergreen_cs_packet_next_reloc(p, &reloc);
492 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 525 if (r) {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 526 dev_warn(p->dev, "bad SET_CONTEXT_REG "
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 527 "0x%04X\n", reg);
495 } else { 528 return -EINVAL;
496 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 529 }
497 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 530 ib[idx] &= ~Z_ARRAY_MODE(0xf);
531 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
532 ib[idx] |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
533 track->db_z_info |= Z_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
534 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
535 ib[idx] |= DB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
536 ib[idx] |= DB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
537 }
498 } 538 }
499 break; 539 break;
500 case DB_STENCIL_INFO: 540 case DB_STENCIL_INFO:
@@ -607,40 +647,34 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
607 case CB_COLOR5_INFO: 647 case CB_COLOR5_INFO:
608 case CB_COLOR6_INFO: 648 case CB_COLOR6_INFO:
609 case CB_COLOR7_INFO: 649 case CB_COLOR7_INFO:
610 r = evergreen_cs_packet_next_reloc(p, &reloc);
611 if (r) {
612 dev_warn(p->dev, "bad SET_CONTEXT_REG "
613 "0x%04X\n", reg);
614 return -EINVAL;
615 }
616 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 650 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
617 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 651 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
618 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 652 if (!p->keep_tiling_flags) {
619 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 653 r = evergreen_cs_packet_next_reloc(p, &reloc);
620 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 654 if (r) {
621 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 655 dev_warn(p->dev, "bad SET_CONTEXT_REG "
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 656 "0x%04X\n", reg);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 657 return -EINVAL;
658 }
659 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
660 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
624 } 661 }
625 break; 662 break;
626 case CB_COLOR8_INFO: 663 case CB_COLOR8_INFO:
627 case CB_COLOR9_INFO: 664 case CB_COLOR9_INFO:
628 case CB_COLOR10_INFO: 665 case CB_COLOR10_INFO:
629 case CB_COLOR11_INFO: 666 case CB_COLOR11_INFO:
630 r = evergreen_cs_packet_next_reloc(p, &reloc);
631 if (r) {
632 dev_warn(p->dev, "bad SET_CONTEXT_REG "
633 "0x%04X\n", reg);
634 return -EINVAL;
635 }
636 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 667 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
637 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 668 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
638 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 669 if (!p->keep_tiling_flags) {
639 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 670 r = evergreen_cs_packet_next_reloc(p, &reloc);
640 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 671 if (r) {
641 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 672 dev_warn(p->dev, "bad SET_CONTEXT_REG "
642 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 673 "0x%04X\n", reg);
643 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 674 return -EINVAL;
675 }
676 ib[idx] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
677 track->cb_color_info[tmp] |= CB_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
644 } 678 }
645 break; 679 break;
646 case CB_COLOR0_PITCH: 680 case CB_COLOR0_PITCH:
@@ -695,6 +729,16 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
695 case CB_COLOR9_ATTRIB: 729 case CB_COLOR9_ATTRIB:
696 case CB_COLOR10_ATTRIB: 730 case CB_COLOR10_ATTRIB:
697 case CB_COLOR11_ATTRIB: 731 case CB_COLOR11_ATTRIB:
732 r = evergreen_cs_packet_next_reloc(p, &reloc);
733 if (r) {
734 dev_warn(p->dev, "bad SET_CONTEXT_REG "
735 "0x%04X\n", reg);
736 return -EINVAL;
737 }
738 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
739 ib[idx] |= CB_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
740 ib[idx] |= CB_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
741 }
698 break; 742 break;
699 case CB_COLOR0_DIM: 743 case CB_COLOR0_DIM:
700 case CB_COLOR1_DIM: 744 case CB_COLOR1_DIM:
@@ -1311,10 +1355,16 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1311 return -EINVAL; 1355 return -EINVAL;
1312 } 1356 }
1313 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1357 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1314 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1358 if (!p->keep_tiling_flags) {
1315 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1359 ib[idx+1+(i*8)+1] |=
1316 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1360 TEX_ARRAY_MODE(evergreen_cs_get_aray_mode(reloc->lobj.tiling_flags));
1317 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1361 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
1362 ib[idx+1+(i*8)+6] |=
1363 TEX_TILE_SPLIT(evergreen_cs_get_tile_split(track->row_size));
1364 ib[idx+1+(i*8)+7] |=
1365 TEX_NUM_BANKS(evergreen_cs_get_num_banks(track->nbanks));
1366 }
1367 }
1318 texture = reloc->robj; 1368 texture = reloc->robj;
1319 /* tex mip base */ 1369 /* tex mip base */
1320 r = evergreen_cs_packet_next_reloc(p, &reloc); 1370 r = evergreen_cs_packet_next_reloc(p, &reloc);
@@ -1414,6 +1464,7 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1414{ 1464{
1415 struct radeon_cs_packet pkt; 1465 struct radeon_cs_packet pkt;
1416 struct evergreen_cs_track *track; 1466 struct evergreen_cs_track *track;
1467 u32 tmp;
1417 int r; 1468 int r;
1418 1469
1419 if (p->track == NULL) { 1470 if (p->track == NULL) {
@@ -1422,9 +1473,63 @@ int evergreen_cs_parse(struct radeon_cs_parser *p)
1422 if (track == NULL) 1473 if (track == NULL)
1423 return -ENOMEM; 1474 return -ENOMEM;
1424 evergreen_cs_track_init(track); 1475 evergreen_cs_track_init(track);
1425 track->npipes = p->rdev->config.evergreen.tiling_npipes; 1476 if (p->rdev->family >= CHIP_CAYMAN)
1426 track->nbanks = p->rdev->config.evergreen.tiling_nbanks; 1477 tmp = p->rdev->config.cayman.tile_config;
1427 track->group_size = p->rdev->config.evergreen.tiling_group_size; 1478 else
1479 tmp = p->rdev->config.evergreen.tile_config;
1480
1481 switch (tmp & 0xf) {
1482 case 0:
1483 track->npipes = 1;
1484 break;
1485 case 1:
1486 default:
1487 track->npipes = 2;
1488 break;
1489 case 2:
1490 track->npipes = 4;
1491 break;
1492 case 3:
1493 track->npipes = 8;
1494 break;
1495 }
1496
1497 switch ((tmp & 0xf0) >> 4) {
1498 case 0:
1499 track->nbanks = 4;
1500 break;
1501 case 1:
1502 default:
1503 track->nbanks = 8;
1504 break;
1505 case 2:
1506 track->nbanks = 16;
1507 break;
1508 }
1509
1510 switch ((tmp & 0xf00) >> 8) {
1511 case 0:
1512 track->group_size = 256;
1513 break;
1514 case 1:
1515 default:
1516 track->group_size = 512;
1517 break;
1518 }
1519
1520 switch ((tmp & 0xf000) >> 12) {
1521 case 0:
1522 track->row_size = 1;
1523 break;
1524 case 1:
1525 default:
1526 track->row_size = 2;
1527 break;
1528 case 2:
1529 track->row_size = 4;
1530 break;
1531 }
1532
1428 p->track = track; 1533 p->track = track;
1429 } 1534 }
1430 do { 1535 do {
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index c781c92c3451..7d7f2155e34c 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -42,6 +42,17 @@
42# define EVERGREEN_GRPH_DEPTH_8BPP 0 42# define EVERGREEN_GRPH_DEPTH_8BPP 0
43# define EVERGREEN_GRPH_DEPTH_16BPP 1 43# define EVERGREEN_GRPH_DEPTH_16BPP 1
44# define EVERGREEN_GRPH_DEPTH_32BPP 2 44# define EVERGREEN_GRPH_DEPTH_32BPP 2
45# define EVERGREEN_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
46# define EVERGREEN_ADDR_SURF_2_BANK 0
47# define EVERGREEN_ADDR_SURF_4_BANK 1
48# define EVERGREEN_ADDR_SURF_8_BANK 2
49# define EVERGREEN_ADDR_SURF_16_BANK 3
50# define EVERGREEN_GRPH_Z(x) (((x) & 0x3) << 4)
51# define EVERGREEN_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
52# define EVERGREEN_ADDR_SURF_BANK_WIDTH_1 0
53# define EVERGREEN_ADDR_SURF_BANK_WIDTH_2 1
54# define EVERGREEN_ADDR_SURF_BANK_WIDTH_4 2
55# define EVERGREEN_ADDR_SURF_BANK_WIDTH_8 3
45# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8) 56# define EVERGREEN_GRPH_FORMAT(x) (((x) & 0x7) << 8)
46/* 8 BPP */ 57/* 8 BPP */
47# define EVERGREEN_GRPH_FORMAT_INDEXED 0 58# define EVERGREEN_GRPH_FORMAT_INDEXED 0
@@ -61,6 +72,24 @@
61# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5 72# define EVERGREEN_GRPH_FORMAT_8B_BGRA1010102 5
62# define EVERGREEN_GRPH_FORMAT_RGB111110 6 73# define EVERGREEN_GRPH_FORMAT_RGB111110 6
63# define EVERGREEN_GRPH_FORMAT_BGR101111 7 74# define EVERGREEN_GRPH_FORMAT_BGR101111 7
75# define EVERGREEN_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
76# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_1 0
77# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_2 1
78# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_4 2
79# define EVERGREEN_ADDR_SURF_BANK_HEIGHT_8 3
80# define EVERGREEN_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
81# define EVERGREEN_ADDR_SURF_TILE_SPLIT_64B 0
82# define EVERGREEN_ADDR_SURF_TILE_SPLIT_128B 1
83# define EVERGREEN_ADDR_SURF_TILE_SPLIT_256B 2
84# define EVERGREEN_ADDR_SURF_TILE_SPLIT_512B 3
85# define EVERGREEN_ADDR_SURF_TILE_SPLIT_1KB 4
86# define EVERGREEN_ADDR_SURF_TILE_SPLIT_2KB 5
87# define EVERGREEN_ADDR_SURF_TILE_SPLIT_4KB 6
88# define EVERGREEN_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
89# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_1 0
90# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_2 1
91# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_4 2
92# define EVERGREEN_ADDR_SURF_MACRO_TILE_ASPECT_8 3
64# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20) 93# define EVERGREEN_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
65# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0 94# define EVERGREEN_GRPH_ARRAY_LINEAR_GENERAL 0
66# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1 95# define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED 1
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index b937c49054d9..e00039e59a75 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -899,6 +899,10 @@
899#define DB_HTILE_DATA_BASE 0x28014 899#define DB_HTILE_DATA_BASE 0x28014
900#define DB_Z_INFO 0x28040 900#define DB_Z_INFO 0x28040
901# define Z_ARRAY_MODE(x) ((x) << 4) 901# define Z_ARRAY_MODE(x) ((x) << 4)
902# define DB_TILE_SPLIT(x) (((x) & 0x7) << 8)
903# define DB_NUM_BANKS(x) (((x) & 0x3) << 12)
904# define DB_BANK_WIDTH(x) (((x) & 0x3) << 16)
905# define DB_BANK_HEIGHT(x) (((x) & 0x3) << 20)
902#define DB_STENCIL_INFO 0x28044 906#define DB_STENCIL_INFO 0x28044
903#define DB_Z_READ_BASE 0x28048 907#define DB_Z_READ_BASE 0x28048
904#define DB_STENCIL_READ_BASE 0x2804c 908#define DB_STENCIL_READ_BASE 0x2804c
@@ -951,6 +955,29 @@
951# define CB_SF_EXPORT_FULL 0 955# define CB_SF_EXPORT_FULL 0
952# define CB_SF_EXPORT_NORM 1 956# define CB_SF_EXPORT_NORM 1
953#define CB_COLOR0_ATTRIB 0x28c74 957#define CB_COLOR0_ATTRIB 0x28c74
958# define CB_TILE_SPLIT(x) (((x) & 0x7) << 5)
959# define ADDR_SURF_TILE_SPLIT_64B 0
960# define ADDR_SURF_TILE_SPLIT_128B 1
961# define ADDR_SURF_TILE_SPLIT_256B 2
962# define ADDR_SURF_TILE_SPLIT_512B 3
963# define ADDR_SURF_TILE_SPLIT_1KB 4
964# define ADDR_SURF_TILE_SPLIT_2KB 5
965# define ADDR_SURF_TILE_SPLIT_4KB 6
966# define CB_NUM_BANKS(x) (((x) & 0x3) << 10)
967# define ADDR_SURF_2_BANK 0
968# define ADDR_SURF_4_BANK 1
969# define ADDR_SURF_8_BANK 2
970# define ADDR_SURF_16_BANK 3
971# define CB_BANK_WIDTH(x) (((x) & 0x3) << 13)
972# define ADDR_SURF_BANK_WIDTH_1 0
973# define ADDR_SURF_BANK_WIDTH_2 1
974# define ADDR_SURF_BANK_WIDTH_4 2
975# define ADDR_SURF_BANK_WIDTH_8 3
976# define CB_BANK_HEIGHT(x) (((x) & 0x3) << 16)
977# define ADDR_SURF_BANK_HEIGHT_1 0
978# define ADDR_SURF_BANK_HEIGHT_2 1
979# define ADDR_SURF_BANK_HEIGHT_4 2
980# define ADDR_SURF_BANK_HEIGHT_8 3
954#define CB_COLOR0_DIM 0x28c78 981#define CB_COLOR0_DIM 0x28c78
955/* only CB0-7 blocks have these regs */ 982/* only CB0-7 blocks have these regs */
956#define CB_COLOR0_CMASK 0x28c7c 983#define CB_COLOR0_CMASK 0x28c7c
@@ -1137,7 +1164,11 @@
1137# define SQ_SEL_1 5 1164# define SQ_SEL_1 5
1138#define SQ_TEX_RESOURCE_WORD5_0 0x30014 1165#define SQ_TEX_RESOURCE_WORD5_0 0x30014
1139#define SQ_TEX_RESOURCE_WORD6_0 0x30018 1166#define SQ_TEX_RESOURCE_WORD6_0 0x30018
1167# define TEX_TILE_SPLIT(x) (((x) & 0x7) << 29)
1140#define SQ_TEX_RESOURCE_WORD7_0 0x3001c 1168#define SQ_TEX_RESOURCE_WORD7_0 0x3001c
1169# define TEX_BANK_WIDTH(x) (((x) & 0x3) << 8)
1170# define TEX_BANK_HEIGHT(x) (((x) & 0x3) << 10)
1171# define TEX_NUM_BANKS(x) (((x) & 0x3) << 16)
1141 1172
1142#define SQ_VTX_CONSTANT_WORD0_0 0x30000 1173#define SQ_VTX_CONSTANT_WORD0_0 0x30000
1143#define SQ_VTX_CONSTANT_WORD1_0 0x30004 1174#define SQ_VTX_CONSTANT_WORD1_0 0x30004
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ad158ea49901..bfc08f6320f8 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -187,13 +187,18 @@ u32 r100_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
187{ 187{
188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 188 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK; 189 u32 tmp = ((u32)crtc_base) | RADEON_CRTC_OFFSET__OFFSET_LOCK;
190 int i;
190 191
191 /* Lock the graphics update lock */ 192 /* Lock the graphics update lock */
192 /* update the scanout addresses */ 193 /* update the scanout addresses */
193 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp); 194 WREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset, tmp);
194 195
195 /* Wait for update_pending to go high. */ 196 /* Wait for update_pending to go high. */
196 while (!(RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)); 197 for (i = 0; i < rdev->usec_timeout; i++) {
198 if (RREG32(RADEON_CRTC_OFFSET + radeon_crtc->crtc_offset) & RADEON_CRTC_OFFSET__GUI_TRIG_OFFSET)
199 break;
200 udelay(1);
201 }
197 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 202 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
198 203
199 /* Unlock the lock, so double-buffering can take place inside vblank */ 204 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652a..c93bc64707e1 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
701 return r; 701 return r;
702 } 702 }
703 703
704 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 704 if (p->keep_tiling_flags) {
705 tile_flags |= R300_TXO_MACRO_TILE; 705 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
707 tile_flags |= R300_TXO_MICRO_TILE; 707 } else {
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 708 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 709 tile_flags |= R300_TXO_MACRO_TILE;
710 710 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tile_flags |= R300_TXO_MICRO_TILE;
712 tmp |= tile_flags; 712 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
713 ib[idx] = tmp; 713 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
714
715 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
716 tmp |= tile_flags;
717 ib[idx] = tmp;
718 }
714 track->textures[i].robj = reloc->robj; 719 track->textures[i].robj = reloc->robj;
715 track->tex_dirty = true; 720 track->tex_dirty = true;
716 break; 721 break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
760 /* RB3D_COLORPITCH1 */ 765 /* RB3D_COLORPITCH1 */
761 /* RB3D_COLORPITCH2 */ 766 /* RB3D_COLORPITCH2 */
762 /* RB3D_COLORPITCH3 */ 767 /* RB3D_COLORPITCH3 */
763 r = r100_cs_packet_next_reloc(p, &reloc); 768 if (!p->keep_tiling_flags) {
764 if (r) { 769 r = r100_cs_packet_next_reloc(p, &reloc);
765 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 770 if (r) {
766 idx, reg); 771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
767 r100_cs_dump_packet(p, pkt); 772 idx, reg);
768 return r; 773 r100_cs_dump_packet(p, pkt);
769 } 774 return r;
775 }
770 776
771 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 777 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
772 tile_flags |= R300_COLOR_TILE_ENABLE; 778 tile_flags |= R300_COLOR_TILE_ENABLE;
773 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 779 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
774 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 780 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
775 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 781 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
776 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 782 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
777 783
778 tmp = idx_value & ~(0x7 << 16); 784 tmp = idx_value & ~(0x7 << 16);
779 tmp |= tile_flags; 785 tmp |= tile_flags;
780 ib[idx] = tmp; 786 ib[idx] = tmp;
787 }
781 i = (reg - 0x4E38) >> 2; 788 i = (reg - 0x4E38) >> 2;
782 track->cb[i].pitch = idx_value & 0x3FFE; 789 track->cb[i].pitch = idx_value & 0x3FFE;
783 switch (((idx_value >> 21) & 0xF)) { 790 switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
843 break; 850 break;
844 case 0x4F24: 851 case 0x4F24:
845 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
846 r = r100_cs_packet_next_reloc(p, &reloc); 853 if (!p->keep_tiling_flags) {
847 if (r) { 854 r = r100_cs_packet_next_reloc(p, &reloc);
848 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 855 if (r) {
849 idx, reg); 856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
850 r100_cs_dump_packet(p, pkt); 857 idx, reg);
851 return r; 858 r100_cs_dump_packet(p, pkt);
852 } 859 return r;
853 860 }
854 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
855 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
856 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
857 tile_flags |= R300_DEPTHMICROTILE_TILED;
858 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
859 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
860 861
861 tmp = idx_value & ~(0x7 << 16); 862 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
862 tmp |= tile_flags; 863 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
863 ib[idx] = tmp; 864 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
865 tile_flags |= R300_DEPTHMICROTILE_TILED;
866 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
867 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
864 868
869 tmp = idx_value & ~(0x7 << 16);
870 tmp |= tile_flags;
871 ib[idx] = tmp;
872 }
865 track->zb.pitch = idx_value & 0x3FFC; 873 track->zb.pitch = idx_value & 0x3FFC;
866 track->zb_dirty = true; 874 track->zb_dirty = true;
867 break; 875 break;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 19afc43ad173..9cdda0b3b081 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -288,24 +288,6 @@ void r600_pm_get_dynpm_state(struct radeon_device *rdev)
288 pcie_lanes); 288 pcie_lanes);
289} 289}
290 290
291static int r600_pm_get_type_index(struct radeon_device *rdev,
292 enum radeon_pm_state_type ps_type,
293 int instance)
294{
295 int i;
296 int found_instance = -1;
297
298 for (i = 0; i < rdev->pm.num_power_states; i++) {
299 if (rdev->pm.power_state[i].type == ps_type) {
300 found_instance++;
301 if (found_instance == instance)
302 return i;
303 }
304 }
305 /* return default if no match */
306 return rdev->pm.default_power_state_index;
307}
308
309void rs780_pm_init_profile(struct radeon_device *rdev) 291void rs780_pm_init_profile(struct radeon_device *rdev)
310{ 292{
311 if (rdev->pm.num_power_states == 2) { 293 if (rdev->pm.num_power_states == 2) {
@@ -421,6 +403,8 @@ void rs780_pm_init_profile(struct radeon_device *rdev)
421 403
422void r600_pm_init_profile(struct radeon_device *rdev) 404void r600_pm_init_profile(struct radeon_device *rdev)
423{ 405{
406 int idx;
407
424 if (rdev->family == CHIP_R600) { 408 if (rdev->family == CHIP_R600) {
425 /* XXX */ 409 /* XXX */
426 /* default */ 410 /* default */
@@ -502,81 +486,43 @@ void r600_pm_init_profile(struct radeon_device *rdev)
502 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0; 486 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_off_cm_idx = 0;
503 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2; 487 rdev->pm.profiles[PM_PROFILE_DEFAULT_IDX].dpms_on_cm_idx = 2;
504 /* low sh */ 488 /* low sh */
505 if (rdev->flags & RADEON_IS_MOBILITY) { 489 if (rdev->flags & RADEON_IS_MOBILITY)
506 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = 490 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
507 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 491 else
508 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = 492 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
509 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 493 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx = idx;
510 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0; 494 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx = idx;
511 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0; 495 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
512 } else { 496 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
513 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_ps_idx =
514 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
515 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_ps_idx =
516 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
517 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_off_cm_idx = 0;
518 rdev->pm.profiles[PM_PROFILE_LOW_SH_IDX].dpms_on_cm_idx = 0;
519 }
520 /* mid sh */ 497 /* mid sh */
521 if (rdev->flags & RADEON_IS_MOBILITY) { 498 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = idx;
522 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx = 499 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = idx;
523 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0); 500 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
524 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx = 501 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
525 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 0);
526 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
527 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
528 } else {
529 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_ps_idx =
530 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
531 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_ps_idx =
532 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
533 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_off_cm_idx = 0;
534 rdev->pm.profiles[PM_PROFILE_MID_SH_IDX].dpms_on_cm_idx = 1;
535 }
536 /* high sh */ 502 /* high sh */
537 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = 503 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
538 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0); 504 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_ps_idx = idx;
539 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = 505 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_ps_idx = idx;
540 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 0);
541 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0; 506 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_off_cm_idx = 0;
542 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2; 507 rdev->pm.profiles[PM_PROFILE_HIGH_SH_IDX].dpms_on_cm_idx = 2;
543 /* low mh */ 508 /* low mh */
544 if (rdev->flags & RADEON_IS_MOBILITY) { 509 if (rdev->flags & RADEON_IS_MOBILITY)
545 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = 510 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
546 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 511 else
547 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = 512 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
548 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 513 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx = idx;
549 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0; 514 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx = idx;
550 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0; 515 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
551 } else { 516 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
552 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_ps_idx =
553 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
554 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_ps_idx =
555 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
556 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_off_cm_idx = 0;
557 rdev->pm.profiles[PM_PROFILE_LOW_MH_IDX].dpms_on_cm_idx = 0;
558 }
559 /* mid mh */ 517 /* mid mh */
560 if (rdev->flags & RADEON_IS_MOBILITY) { 518 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = idx;
561 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx = 519 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = idx;
562 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1); 520 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
563 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx = 521 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
564 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_BATTERY, 1);
565 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
566 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
567 } else {
568 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_ps_idx =
569 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
570 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_ps_idx =
571 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
572 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_off_cm_idx = 0;
573 rdev->pm.profiles[PM_PROFILE_MID_MH_IDX].dpms_on_cm_idx = 1;
574 }
575 /* high mh */ 522 /* high mh */
576 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = 523 idx = radeon_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
577 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1); 524 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_ps_idx = idx;
578 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = 525 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_ps_idx = idx;
579 r600_pm_get_type_index(rdev, POWER_STATE_TYPE_PERFORMANCE, 1);
580 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0; 526 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_off_cm_idx = 0;
581 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2; 527 rdev->pm.profiles[PM_PROFILE_HIGH_MH_IDX].dpms_on_cm_idx = 2;
582 } 528 }
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c1557..cb1acffd2430 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
941 track->db_depth_control = radeon_get_ib_value(p, idx); 941 track->db_depth_control = radeon_get_ib_value(p, idx);
942 break; 942 break;
943 case R_028010_DB_DEPTH_INFO: 943 case R_028010_DB_DEPTH_INFO:
944 if (r600_cs_packet_next_is_pkt3_nop(p)) { 944 if (!p->keep_tiling_flags &&
945 r600_cs_packet_next_is_pkt3_nop(p)) {
945 r = r600_cs_packet_next_reloc(p, &reloc); 946 r = r600_cs_packet_next_reloc(p, &reloc);
946 if (r) { 947 if (r) {
947 dev_warn(p->dev, "bad SET_CONTEXT_REG " 948 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
992 case R_0280B4_CB_COLOR5_INFO: 993 case R_0280B4_CB_COLOR5_INFO:
993 case R_0280B8_CB_COLOR6_INFO: 994 case R_0280B8_CB_COLOR6_INFO:
994 case R_0280BC_CB_COLOR7_INFO: 995 case R_0280BC_CB_COLOR7_INFO:
995 if (r600_cs_packet_next_is_pkt3_nop(p)) { 996 if (!p->keep_tiling_flags &&
997 r600_cs_packet_next_is_pkt3_nop(p)) {
996 r = r600_cs_packet_next_reloc(p, &reloc); 998 r = r600_cs_packet_next_reloc(p, &reloc);
997 if (r) { 999 if (r) {
998 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1000 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1291 mip_offset <<= 8; 1293 mip_offset <<= 8;
1292 1294
1293 word0 = radeon_get_ib_value(p, idx + 0); 1295 word0 = radeon_get_ib_value(p, idx + 0);
1294 if (tiling_flags & RADEON_TILING_MACRO) 1296 if (!p->keep_tiling_flags) {
1295 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1297 if (tiling_flags & RADEON_TILING_MACRO)
1296 else if (tiling_flags & RADEON_TILING_MICRO) 1298 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1297 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1299 else if (tiling_flags & RADEON_TILING_MICRO)
1300 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1301 }
1298 word1 = radeon_get_ib_value(p, idx + 1); 1302 word1 = radeon_get_ib_value(p, idx + 1);
1299 w0 = G_038000_TEX_WIDTH(word0) + 1; 1303 w0 = G_038000_TEX_WIDTH(word0) + 1;
1300 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1304 h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1621 return -EINVAL; 1625 return -EINVAL;
1622 } 1626 }
1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1624 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1628 if (!p->keep_tiling_flags) {
1625 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1626 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1630 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1627 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1631 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1632 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1633 }
1628 texture = reloc->robj; 1634 texture = reloc->robj;
1629 /* tex mip base */ 1635 /* tex mip base */
1630 r = r600_cs_packet_next_reloc(p, &reloc); 1636 r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b316b301152f..8227e76b5c70 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -611,7 +611,8 @@ struct radeon_cs_parser {
611 struct radeon_ib *ib; 611 struct radeon_ib *ib;
612 void *track; 612 void *track;
613 unsigned family; 613 unsigned family;
614 int parser_error; 614 int parser_error;
615 bool keep_tiling_flags;
615}; 616};
616 617
617extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 618extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
@@ -784,8 +785,7 @@ struct radeon_pm_clock_info {
784 785
785struct radeon_power_state { 786struct radeon_power_state {
786 enum radeon_pm_state_type type; 787 enum radeon_pm_state_type type;
787 /* XXX: use a define for num clock modes */ 788 struct radeon_pm_clock_info *clock_info;
788 struct radeon_pm_clock_info clock_info[8];
789 /* number of valid clock modes in this power state */ 789 /* number of valid clock modes in this power state */
790 int num_clock_modes; 790 int num_clock_modes;
791 struct radeon_pm_clock_info *default_clock_mode; 791 struct radeon_pm_clock_info *default_clock_mode;
@@ -855,6 +855,9 @@ struct radeon_pm {
855 struct device *int_hwmon_dev; 855 struct device *int_hwmon_dev;
856}; 856};
857 857
858int radeon_pm_get_type_index(struct radeon_device *rdev,
859 enum radeon_pm_state_type ps_type,
860 int instance);
858 861
859/* 862/*
860 * Benchmarking 863 * Benchmarking
@@ -1142,6 +1145,48 @@ struct r600_vram_scratch {
1142 u64 gpu_addr; 1145 u64 gpu_addr;
1143}; 1146};
1144 1147
1148
1149/*
1150 * Mutex which allows recursive locking from the same process.
1151 */
1152struct radeon_mutex {
1153 struct mutex mutex;
1154 struct task_struct *owner;
1155 int level;
1156};
1157
1158static inline void radeon_mutex_init(struct radeon_mutex *mutex)
1159{
1160 mutex_init(&mutex->mutex);
1161 mutex->owner = NULL;
1162 mutex->level = 0;
1163}
1164
1165static inline void radeon_mutex_lock(struct radeon_mutex *mutex)
1166{
1167 if (mutex_trylock(&mutex->mutex)) {
1168 /* The mutex was unlocked before, so it's ours now */
1169 mutex->owner = current;
1170 } else if (mutex->owner != current) {
1171 /* Another process locked the mutex, take it */
1172 mutex_lock(&mutex->mutex);
1173 mutex->owner = current;
1174 }
1175 /* Otherwise the mutex was already locked by this process */
1176
1177 mutex->level++;
1178}
1179
1180static inline void radeon_mutex_unlock(struct radeon_mutex *mutex)
1181{
1182 if (--mutex->level > 0)
1183 return;
1184
1185 mutex->owner = NULL;
1186 mutex_unlock(&mutex->mutex);
1187}
1188
1189
1145/* 1190/*
1146 * Core structure, functions and helpers. 1191 * Core structure, functions and helpers.
1147 */ 1192 */
@@ -1197,7 +1242,7 @@ struct radeon_device {
1197 struct radeon_gem gem; 1242 struct radeon_gem gem;
1198 struct radeon_pm pm; 1243 struct radeon_pm pm;
1199 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH]; 1244 uint32_t bios_scratch[RADEON_BIOS_NUM_SCRATCH];
1200 struct mutex cs_mutex; 1245 struct radeon_mutex cs_mutex;
1201 struct radeon_wb wb; 1246 struct radeon_wb wb;
1202 struct radeon_dummy_page dummy_page; 1247 struct radeon_dummy_page dummy_page;
1203 bool gpu_lockup; 1248 bool gpu_lockup;
diff --git a/drivers/gpu/drm/radeon/radeon_acpi.c b/drivers/gpu/drm/radeon/radeon_acpi.c
index 3f6636bb2d7f..3516a6081dcf 100644
--- a/drivers/gpu/drm/radeon/radeon_acpi.c
+++ b/drivers/gpu/drm/radeon/radeon_acpi.c
@@ -35,7 +35,8 @@ static int radeon_atif_call(acpi_handle handle)
35 35
36 /* Fail only if calling the method fails and ATIF is supported */ 36 /* Fail only if calling the method fails and ATIF is supported */
37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) { 37 if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
38 printk(KERN_DEBUG "failed to evaluate ATIF got %s\n", acpi_format_exception(status)); 38 DRM_DEBUG_DRIVER("failed to evaluate ATIF got %s\n",
39 acpi_format_exception(status));
39 kfree(buffer.pointer); 40 kfree(buffer.pointer);
40 return 1; 41 return 1;
41 } 42 }
@@ -50,13 +51,13 @@ int radeon_acpi_init(struct radeon_device *rdev)
50 acpi_handle handle; 51 acpi_handle handle;
51 int ret; 52 int ret;
52 53
53 /* No need to proceed if we're sure that ATIF is not supported */
54 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios)
55 return 0;
56
57 /* Get the device handle */ 54 /* Get the device handle */
58 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev); 55 handle = DEVICE_ACPI_HANDLE(&rdev->pdev->dev);
59 56
57 /* No need to proceed if we're sure that ATIF is not supported */
58 if (!ASIC_IS_AVIVO(rdev) || !rdev->bios || !handle)
59 return 0;
60
60 /* Call the ATIF method */ 61 /* Call the ATIF method */
61 ret = radeon_atif_call(handle); 62 ret = radeon_atif_call(handle);
62 if (ret) 63 if (ret)
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e2944566ffea..a2e1eae114ef 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -834,7 +834,7 @@ static struct radeon_asic sumo_asic = {
834 .pm_misc = &evergreen_pm_misc, 834 .pm_misc = &evergreen_pm_misc,
835 .pm_prepare = &evergreen_pm_prepare, 835 .pm_prepare = &evergreen_pm_prepare,
836 .pm_finish = &evergreen_pm_finish, 836 .pm_finish = &evergreen_pm_finish,
837 .pm_init_profile = &rs780_pm_init_profile, 837 .pm_init_profile = &sumo_pm_init_profile,
838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state, 838 .pm_get_dynpm_state = &r600_pm_get_dynpm_state,
839 .pre_page_flip = &evergreen_pre_page_flip, 839 .pre_page_flip = &evergreen_pre_page_flip,
840 .page_flip = &evergreen_page_flip, 840 .page_flip = &evergreen_page_flip,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 85f14f0337e4..59914842a729 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -413,6 +413,7 @@ extern int evergreen_cs_parse(struct radeon_cs_parser *p);
413extern void evergreen_pm_misc(struct radeon_device *rdev); 413extern void evergreen_pm_misc(struct radeon_device *rdev);
414extern void evergreen_pm_prepare(struct radeon_device *rdev); 414extern void evergreen_pm_prepare(struct radeon_device *rdev);
415extern void evergreen_pm_finish(struct radeon_device *rdev); 415extern void evergreen_pm_finish(struct radeon_device *rdev);
416extern void sumo_pm_init_profile(struct radeon_device *rdev);
416extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc); 417extern void evergreen_pre_page_flip(struct radeon_device *rdev, int crtc);
417extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base); 418extern u32 evergreen_page_flip(struct radeon_device *rdev, int crtc, u64 crtc_base);
418extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc); 419extern void evergreen_post_page_flip(struct radeon_device *rdev, int crtc);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 08d0b94332e6..d24baf30efcb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
63}; 63};
64 64
65static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
66 ATOM_GPIO_I2C_ASSIGMENT *gpio,
67 u8 index)
68{
69 /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
70 if ((rdev->family == CHIP_R420) ||
71 (rdev->family == CHIP_R423) ||
72 (rdev->family == CHIP_RV410)) {
73 if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
74 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
75 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
76 gpio->ucClkMaskShift = 0x19;
77 gpio->ucDataMaskShift = 0x18;
78 }
79 }
80
81 /* some evergreen boards have bad data for this entry */
82 if (ASIC_IS_DCE4(rdev)) {
83 if ((index == 7) &&
84 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
85 (gpio->sucI2cId.ucAccess == 0)) {
86 gpio->sucI2cId.ucAccess = 0x97;
87 gpio->ucDataMaskShift = 8;
88 gpio->ucDataEnShift = 8;
89 gpio->ucDataY_Shift = 8;
90 gpio->ucDataA_Shift = 8;
91 }
92 }
93
94 /* some DCE3 boards have bad data for this entry */
95 if (ASIC_IS_DCE3(rdev)) {
96 if ((index == 4) &&
97 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
98 (gpio->sucI2cId.ucAccess == 0x94))
99 gpio->sucI2cId.ucAccess = 0x14;
100 }
101}
102
103static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
104{
105 struct radeon_i2c_bus_rec i2c;
106
107 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
108
109 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
110 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
111 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
112 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
113 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
114 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
115 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
116 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
117 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
118 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
119 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
120 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
121 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
122 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
123 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
124 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
125
126 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
127 i2c.hw_capable = true;
128 else
129 i2c.hw_capable = false;
130
131 if (gpio->sucI2cId.ucAccess == 0xa0)
132 i2c.mm_i2c = true;
133 else
134 i2c.mm_i2c = false;
135
136 i2c.i2c_id = gpio->sucI2cId.ucAccess;
137
138 if (i2c.mask_clk_reg)
139 i2c.valid = true;
140 else
141 i2c.valid = false;
142
143 return i2c;
144}
145
65static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 146static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
66 uint8_t id) 147 uint8_t id)
67{ 148{
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
85 for (i = 0; i < num_indices; i++) { 166 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 167 gpio = &i2c_info->asGPIO_Info[i];
87 168
88 /* some evergreen boards have bad data for this entry */ 169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) &&
104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14;
107 }
108 170
109 if (gpio->sucI2cId.ucAccess == id) { 171 if (gpio->sucI2cId.ucAccess == id) {
110 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
111 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
112 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
113 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
114 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
115 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
116 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
117 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
118 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
119 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
120 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
121 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
122 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
123 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
124 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
125 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
126
127 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
128 i2c.hw_capable = true;
129 else
130 i2c.hw_capable = false;
131
132 if (gpio->sucI2cId.ucAccess == 0xa0)
133 i2c.mm_i2c = true;
134 else
135 i2c.mm_i2c = false;
136
137 i2c.i2c_id = gpio->sucI2cId.ucAccess;
138
139 if (i2c.mask_clk_reg)
140 i2c.valid = true;
141 break; 173 break;
142 } 174 }
143 } 175 }
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
157 int i, num_indices; 189 int i, num_indices;
158 char stmp[32]; 190 char stmp[32];
159 191
160 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
161
162 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 192 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
163 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 193 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
164 194
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
167 197
168 for (i = 0; i < num_indices; i++) { 198 for (i = 0; i < num_indices; i++) {
169 gpio = &i2c_info->asGPIO_Info[i]; 199 gpio = &i2c_info->asGPIO_Info[i];
170 i2c.valid = false;
171
172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) &&
175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8;
179 gpio->ucDataEnShift = 8;
180 gpio->ucDataY_Shift = 8;
181 gpio->ucDataA_Shift = 8;
182 }
183 }
184 200
185 /* some DCE3 boards have bad data for this entry */ 201 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) &&
188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14;
191 }
192 202
193 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 203 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
194 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
195 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
196 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
197 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
198 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
199 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
200 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
201 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
202 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
203 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
204 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
205 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
206 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
207 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
208 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
209
210 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
211 i2c.hw_capable = true;
212 else
213 i2c.hw_capable = false;
214
215 if (gpio->sucI2cId.ucAccess == 0xa0)
216 i2c.mm_i2c = true;
217 else
218 i2c.mm_i2c = false;
219 204
220 i2c.i2c_id = gpio->sucI2cId.ucAccess; 205 if (i2c.valid) {
221
222 if (i2c.mask_clk_reg) {
223 i2c.valid = true;
224 sprintf(stmp, "0x%x", i2c.i2c_id); 206 sprintf(stmp, "0x%x", i2c.i2c_id);
225 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 207 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
226 } 208 }
@@ -1996,10 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1996 return state_index; 1978 return state_index;
1997 /* last mode is usually default, array is low to high */ 1979 /* last mode is usually default, array is low to high */
1998 for (i = 0; i < num_modes; i++) { 1980 for (i = 0; i < num_modes; i++) {
1981 rdev->pm.power_state[state_index].clock_info =
1982 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
1983 if (!rdev->pm.power_state[state_index].clock_info)
1984 return state_index;
1985 rdev->pm.power_state[state_index].num_clock_modes = 1;
1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1986 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2000 switch (frev) { 1987 switch (frev) {
2001 case 1: 1988 case 1:
2002 rdev->pm.power_state[state_index].num_clock_modes = 1;
2003 rdev->pm.power_state[state_index].clock_info[0].mclk = 1989 rdev->pm.power_state[state_index].clock_info[0].mclk =
2004 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 1990 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
2005 rdev->pm.power_state[state_index].clock_info[0].sclk = 1991 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2035,7 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2035 state_index++; 2021 state_index++;
2036 break; 2022 break;
2037 case 2: 2023 case 2:
2038 rdev->pm.power_state[state_index].num_clock_modes = 1;
2039 rdev->pm.power_state[state_index].clock_info[0].mclk = 2024 rdev->pm.power_state[state_index].clock_info[0].mclk =
2040 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); 2025 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
2041 rdev->pm.power_state[state_index].clock_info[0].sclk = 2026 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2072,7 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2072 state_index++; 2057 state_index++;
2073 break; 2058 break;
2074 case 3: 2059 case 3:
2075 rdev->pm.power_state[state_index].num_clock_modes = 1;
2076 rdev->pm.power_state[state_index].clock_info[0].mclk = 2060 rdev->pm.power_state[state_index].clock_info[0].mclk =
2077 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); 2061 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
2078 rdev->pm.power_state[state_index].clock_info[0].sclk = 2062 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2257,7 +2241,7 @@ static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rde
2257 rdev->pm.default_power_state_index = state_index; 2241 rdev->pm.default_power_state_index = state_index;
2258 rdev->pm.power_state[state_index].default_clock_mode = 2242 rdev->pm.power_state[state_index].default_clock_mode =
2259 &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; 2243 &rdev->pm.power_state[state_index].clock_info[mode_index - 1];
2260 if (ASIC_IS_DCE5(rdev)) { 2244 if (ASIC_IS_DCE5(rdev) && !(rdev->flags & RADEON_IS_IGP)) {
2261 /* NI chips post without MC ucode, so default clocks are strobe mode only */ 2245 /* NI chips post without MC ucode, so default clocks are strobe mode only */
2262 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; 2246 rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk;
2263 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; 2247 rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk;
@@ -2377,17 +2361,31 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev)
2377 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + 2361 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2378 (power_state->v1.ucNonClockStateIndex * 2362 (power_state->v1.ucNonClockStateIndex *
2379 power_info->pplib.ucNonClockSize)); 2363 power_info->pplib.ucNonClockSize));
2380 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { 2364 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2381 clock_info = (union pplib_clock_info *) 2365 ((power_info->pplib.ucStateEntrySize - 1) ?
2382 (mode_info->atom_context->bios + data_offset + 2366 (power_info->pplib.ucStateEntrySize - 1) : 1),
2383 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + 2367 GFP_KERNEL);
2384 (power_state->v1.ucClockStateIndices[j] * 2368 if (!rdev->pm.power_state[i].clock_info)
2385 power_info->pplib.ucClockInfoSize)); 2369 return state_index;
2386 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2370 if (power_info->pplib.ucStateEntrySize - 1) {
2387 state_index, mode_index, 2371 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2388 clock_info); 2372 clock_info = (union pplib_clock_info *)
2389 if (valid) 2373 (mode_info->atom_context->bios + data_offset +
2390 mode_index++; 2374 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2375 (power_state->v1.ucClockStateIndices[j] *
2376 power_info->pplib.ucClockInfoSize));
2377 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2378 state_index, mode_index,
2379 clock_info);
2380 if (valid)
2381 mode_index++;
2382 }
2383 } else {
2384 rdev->pm.power_state[state_index].clock_info[0].mclk =
2385 rdev->clock.default_mclk;
2386 rdev->pm.power_state[state_index].clock_info[0].sclk =
2387 rdev->clock.default_sclk;
2388 mode_index++;
2391 } 2389 }
2392 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2390 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2393 if (mode_index) { 2391 if (mode_index) {
@@ -2456,18 +2454,32 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev)
2456 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ 2454 non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */
2457 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) 2455 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2458 &non_clock_info_array->nonClockInfo[non_clock_array_index]; 2456 &non_clock_info_array->nonClockInfo[non_clock_array_index];
2459 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { 2457 rdev->pm.power_state[i].clock_info = kzalloc(sizeof(struct radeon_pm_clock_info) *
2460 clock_array_index = power_state->v2.clockInfoIndex[j]; 2458 (power_state->v2.ucNumDPMLevels ?
2461 /* XXX this might be an inagua bug... */ 2459 power_state->v2.ucNumDPMLevels : 1),
2462 if (clock_array_index >= clock_info_array->ucNumEntries) 2460 GFP_KERNEL);
2463 continue; 2461 if (!rdev->pm.power_state[i].clock_info)
2464 clock_info = (union pplib_clock_info *) 2462 return state_index;
2465 &clock_info_array->clockInfo[clock_array_index]; 2463 if (power_state->v2.ucNumDPMLevels) {
2466 valid = radeon_atombios_parse_pplib_clock_info(rdev, 2464 for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
2467 state_index, mode_index, 2465 clock_array_index = power_state->v2.clockInfoIndex[j];
2468 clock_info); 2466 /* XXX this might be an inagua bug... */
2469 if (valid) 2467 if (clock_array_index >= clock_info_array->ucNumEntries)
2470 mode_index++; 2468 continue;
2469 clock_info = (union pplib_clock_info *)
2470 &clock_info_array->clockInfo[clock_array_index];
2471 valid = radeon_atombios_parse_pplib_clock_info(rdev,
2472 state_index, mode_index,
2473 clock_info);
2474 if (valid)
2475 mode_index++;
2476 }
2477 } else {
2478 rdev->pm.power_state[state_index].clock_info[0].mclk =
2479 rdev->clock.default_mclk;
2480 rdev->pm.power_state[state_index].clock_info[0].sclk =
2481 rdev->clock.default_sclk;
2482 mode_index++;
2471 } 2483 }
2472 rdev->pm.power_state[state_index].num_clock_modes = mode_index; 2484 rdev->pm.power_state[state_index].num_clock_modes = mode_index;
2473 if (mode_index) { 2485 if (mode_index) {
@@ -2524,19 +2536,23 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
2524 } else { 2536 } else {
2525 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); 2537 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL);
2526 if (rdev->pm.power_state) { 2538 if (rdev->pm.power_state) {
2527 /* add the default mode */ 2539 rdev->pm.power_state[0].clock_info =
2528 rdev->pm.power_state[state_index].type = 2540 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2529 POWER_STATE_TYPE_DEFAULT; 2541 if (rdev->pm.power_state[0].clock_info) {
2530 rdev->pm.power_state[state_index].num_clock_modes = 1; 2542 /* add the default mode */
2531 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; 2543 rdev->pm.power_state[state_index].type =
2532 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; 2544 POWER_STATE_TYPE_DEFAULT;
2533 rdev->pm.power_state[state_index].default_clock_mode = 2545 rdev->pm.power_state[state_index].num_clock_modes = 1;
2534 &rdev->pm.power_state[state_index].clock_info[0]; 2546 rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk;
2535 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 2547 rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk;
2536 rdev->pm.power_state[state_index].pcie_lanes = 16; 2548 rdev->pm.power_state[state_index].default_clock_mode =
2537 rdev->pm.default_power_state_index = state_index; 2549 &rdev->pm.power_state[state_index].clock_info[0];
2538 rdev->pm.power_state[state_index].flags = 0; 2550 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2539 state_index++; 2551 rdev->pm.power_state[state_index].pcie_lanes = 16;
2552 rdev->pm.default_power_state_index = state_index;
2553 rdev->pm.power_state[state_index].flags = 0;
2554 state_index++;
2555 }
2540 } 2556 }
2541 } 2557 }
2542 2558
diff --git a/drivers/gpu/drm/radeon/radeon_benchmark.c b/drivers/gpu/drm/radeon/radeon_benchmark.c
index 5cafc90de7f8..17e1a9b2d8fb 100644
--- a/drivers/gpu/drm/radeon/radeon_benchmark.c
+++ b/drivers/gpu/drm/radeon/radeon_benchmark.c
@@ -98,7 +98,7 @@ static void radeon_benchmark_move(struct radeon_device *rdev, unsigned size,
98 struct radeon_bo *sobj = NULL; 98 struct radeon_bo *sobj = NULL;
99 uint64_t saddr, daddr; 99 uint64_t saddr, daddr;
100 int r, n; 100 int r, n;
101 unsigned int time; 101 int time;
102 102
103 n = RADEON_BENCHMARK_ITERATIONS; 103 n = RADEON_BENCHMARK_ITERATIONS;
104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj); 104 r = radeon_bo_create(rdev, size, PAGE_SIZE, true, sdomain, &sobj);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 8bf83c4b4147..81fc100be7e1 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -2563,14 +2563,17 @@ void radeon_combios_get_power_modes(struct radeon_device *rdev)
2563 2563
2564 /* allocate 2 power states */ 2564 /* allocate 2 power states */
2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL); 2565 rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * 2, GFP_KERNEL);
2566 if (!rdev->pm.power_state) { 2566 if (rdev->pm.power_state) {
2567 rdev->pm.default_power_state_index = state_index; 2567 /* allocate 1 clock mode per state */
2568 rdev->pm.num_power_states = 0; 2568 rdev->pm.power_state[0].clock_info =
2569 2569 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2570 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2570 rdev->pm.power_state[1].clock_info =
2571 rdev->pm.current_clock_mode_index = 0; 2571 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2572 return; 2572 if (!rdev->pm.power_state[0].clock_info ||
2573 } 2573 !rdev->pm.power_state[1].clock_info)
2574 goto pm_failed;
2575 } else
2576 goto pm_failed;
2574 2577
2575 /* check for a thermal chip */ 2578 /* check for a thermal chip */
2576 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE); 2579 offset = combios_get_table_offset(dev, COMBIOS_OVERDRIVE_INFO_TABLE);
@@ -2735,6 +2738,14 @@ default_mode:
2735 2738
2736 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; 2739 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2737 rdev->pm.current_clock_mode_index = 0; 2740 rdev->pm.current_clock_mode_index = 0;
2741 return;
2742
2743pm_failed:
2744 rdev->pm.default_power_state_index = state_index;
2745 rdev->pm.num_power_states = 0;
2746
2747 rdev->pm.current_power_state_index = rdev->pm.default_power_state_index;
2748 rdev->pm.current_clock_mode_index = 0;
2738} 2749}
2739 2750
2740void radeon_external_tmds_setup(struct drm_encoder *encoder) 2751void radeon_external_tmds_setup(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index fae00c0d75aa..29afd71e0840 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{ 93{
94 struct drm_radeon_cs *cs = data; 94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr; 95 uint64_t *chunk_array_ptr;
96 unsigned size, i; 96 unsigned size, i, flags = 0;
97 97
98 if (!cs->num_chunks) { 98 if (!cs->num_chunks) {
99 return 0; 99 return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
140 if (p->chunks[i].length_dw == 0) 140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
144 !p->chunks[i].length_dw) {
145 return -EINVAL;
146 }
143 147
144 p->chunks[i].length_dw = user_chunk.length_dw; 148 p->chunks[i].length_dw = user_chunk.length_dw;
145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
155 p->chunks[i].user_ptr, size)) { 159 p->chunks[i].user_ptr, size)) {
156 return -EFAULT; 160 return -EFAULT;
157 } 161 }
162 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
163 flags = p->chunks[i].kdata[0];
164 }
158 } else { 165 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 166 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 167 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
174 p->chunks[p->chunk_ib_idx].length_dw); 181 p->chunks[p->chunk_ib_idx].length_dw);
175 return -EINVAL; 182 return -EINVAL;
176 } 183 }
184
185 p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
177 return 0; 186 return 0;
178} 187}
179 188
@@ -222,7 +231,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
222 struct radeon_cs_chunk *ib_chunk; 231 struct radeon_cs_chunk *ib_chunk;
223 int r; 232 int r;
224 233
225 mutex_lock(&rdev->cs_mutex); 234 radeon_mutex_lock(&rdev->cs_mutex);
226 /* initialize parser */ 235 /* initialize parser */
227 memset(&parser, 0, sizeof(struct radeon_cs_parser)); 236 memset(&parser, 0, sizeof(struct radeon_cs_parser));
228 parser.filp = filp; 237 parser.filp = filp;
@@ -233,14 +242,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
233 if (r) { 242 if (r) {
234 DRM_ERROR("Failed to initialize parser !\n"); 243 DRM_ERROR("Failed to initialize parser !\n");
235 radeon_cs_parser_fini(&parser, r); 244 radeon_cs_parser_fini(&parser, r);
236 mutex_unlock(&rdev->cs_mutex); 245 radeon_mutex_unlock(&rdev->cs_mutex);
237 return r; 246 return r;
238 } 247 }
239 r = radeon_ib_get(rdev, &parser.ib); 248 r = radeon_ib_get(rdev, &parser.ib);
240 if (r) { 249 if (r) {
241 DRM_ERROR("Failed to get ib !\n"); 250 DRM_ERROR("Failed to get ib !\n");
242 radeon_cs_parser_fini(&parser, r); 251 radeon_cs_parser_fini(&parser, r);
243 mutex_unlock(&rdev->cs_mutex); 252 radeon_mutex_unlock(&rdev->cs_mutex);
244 return r; 253 return r;
245 } 254 }
246 r = radeon_cs_parser_relocs(&parser); 255 r = radeon_cs_parser_relocs(&parser);
@@ -248,7 +257,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
248 if (r != -ERESTARTSYS) 257 if (r != -ERESTARTSYS)
249 DRM_ERROR("Failed to parse relocation %d!\n", r); 258 DRM_ERROR("Failed to parse relocation %d!\n", r);
250 radeon_cs_parser_fini(&parser, r); 259 radeon_cs_parser_fini(&parser, r);
251 mutex_unlock(&rdev->cs_mutex); 260 radeon_mutex_unlock(&rdev->cs_mutex);
252 return r; 261 return r;
253 } 262 }
254 /* Copy the packet into the IB, the parser will read from the 263 /* Copy the packet into the IB, the parser will read from the
@@ -260,14 +269,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
260 if (r || parser.parser_error) { 269 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n"); 270 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r); 271 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex); 272 radeon_mutex_unlock(&rdev->cs_mutex);
264 return r; 273 return r;
265 } 274 }
266 r = radeon_cs_finish_pages(&parser); 275 r = radeon_cs_finish_pages(&parser);
267 if (r) { 276 if (r) {
268 DRM_ERROR("Invalid command stream !\n"); 277 DRM_ERROR("Invalid command stream !\n");
269 radeon_cs_parser_fini(&parser, r); 278 radeon_cs_parser_fini(&parser, r);
270 mutex_unlock(&rdev->cs_mutex); 279 radeon_mutex_unlock(&rdev->cs_mutex);
271 return r; 280 return r;
272 } 281 }
273 r = radeon_ib_schedule(rdev, parser.ib); 282 r = radeon_ib_schedule(rdev, parser.ib);
@@ -275,7 +284,7 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
275 DRM_ERROR("Failed to schedule IB !\n"); 284 DRM_ERROR("Failed to schedule IB !\n");
276 } 285 }
277 radeon_cs_parser_fini(&parser, r); 286 radeon_cs_parser_fini(&parser, r);
278 mutex_unlock(&rdev->cs_mutex); 287 radeon_mutex_unlock(&rdev->cs_mutex);
279 return r; 288 return r;
280} 289}
281 290
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index c33bc914d93d..c4d00a171411 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -716,7 +716,7 @@ int radeon_device_init(struct radeon_device *rdev,
716 716
717 /* mutex initialization are all done here so we 717 /* mutex initialization are all done here so we
718 * can recall function without having locking issues */ 718 * can recall function without having locking issues */
719 mutex_init(&rdev->cs_mutex); 719 radeon_mutex_init(&rdev->cs_mutex);
720 mutex_init(&rdev->ib_pool.mutex); 720 mutex_init(&rdev->ib_pool.mutex);
721 mutex_init(&rdev->cp.mutex); 721 mutex_init(&rdev->cp.mutex);
722 mutex_init(&rdev->dc_hw_i2c_mutex); 722 mutex_init(&rdev->dc_hw_i2c_mutex);
@@ -955,6 +955,9 @@ int radeon_gpu_reset(struct radeon_device *rdev)
955 int r; 955 int r;
956 int resched; 956 int resched;
957 957
958 /* Prevent CS ioctl from interfering */
959 radeon_mutex_lock(&rdev->cs_mutex);
960
958 radeon_save_bios_scratch_regs(rdev); 961 radeon_save_bios_scratch_regs(rdev);
959 /* block TTM */ 962 /* block TTM */
960 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); 963 resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev);
@@ -967,10 +970,15 @@ int radeon_gpu_reset(struct radeon_device *rdev)
967 radeon_restore_bios_scratch_regs(rdev); 970 radeon_restore_bios_scratch_regs(rdev);
968 drm_helper_resume_force_mode(rdev->ddev); 971 drm_helper_resume_force_mode(rdev->ddev);
969 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); 972 ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched);
970 return 0;
971 } 973 }
972 /* bad news, how to tell it to userspace ? */ 974
973 dev_info(rdev->dev, "GPU reset failed\n"); 975 radeon_mutex_unlock(&rdev->cs_mutex);
976
977 if (r) {
978 /* bad news, how to tell it to userspace ? */
979 dev_info(rdev->dev, "GPU reset failed\n");
980 }
981
974 return r; 982 return r;
975} 983}
976 984
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a0b35e909489..71499fc3daf5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
54 * 2.10.0 - fusion 2D tiling 54 * 2.10.0 - fusion 2D tiling
55 * 2.11.0 - backend map, initial compute support for the CS checker 55 * 2.11.0 - backend map, initial compute support for the CS checker
56 * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
56 */ 57 */
57#define KMS_DRIVER_MAJOR 2 58#define KMS_DRIVER_MAJOR 2
58#define KMS_DRIVER_MINOR 11 59#define KMS_DRIVER_MINOR 12
59#define KMS_DRIVER_PATCHLEVEL 0 60#define KMS_DRIVER_PATCHLEVEL 0
60int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 61int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
61int radeon_driver_unload_kms(struct drm_device *dev); 62int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 06e413e6a920..4b27efa4405b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -233,13 +233,12 @@ u16 radeon_encoder_get_dp_bridge_encoder_id(struct drm_encoder *encoder)
233 switch (radeon_encoder->encoder_id) { 233 switch (radeon_encoder->encoder_id) {
234 case ENCODER_OBJECT_ID_TRAVIS: 234 case ENCODER_OBJECT_ID_TRAVIS:
235 case ENCODER_OBJECT_ID_NUTMEG: 235 case ENCODER_OBJECT_ID_NUTMEG:
236 return true; 236 return radeon_encoder->encoder_id;
237 default: 237 default:
238 return false; 238 return ENCODER_OBJECT_ID_NONE;
239 } 239 }
240 } 240 }
241 241 return ENCODER_OBJECT_ID_NONE;
242 return false;
243} 242}
244 243
245void radeon_panel_mode_fixup(struct drm_encoder *encoder, 244void radeon_panel_mode_fixup(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index 41a5d48e657b..daadf2111040 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -991,12 +991,6 @@ static bool radeon_crtc_mode_fixup(struct drm_crtc *crtc,
991 struct drm_display_mode *mode, 991 struct drm_display_mode *mode,
992 struct drm_display_mode *adjusted_mode) 992 struct drm_display_mode *adjusted_mode)
993{ 993{
994 struct drm_device *dev = crtc->dev;
995 struct radeon_device *rdev = dev->dev_private;
996
997 /* adjust pm to upcoming mode change */
998 radeon_pm_compute_clocks(rdev);
999
1000 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode)) 994 if (!radeon_crtc_scaling_mode_fixup(crtc, mode, adjusted_mode))
1001 return false; 995 return false;
1002 return true; 996 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6fabe89fa6a1..78a665bd9519 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -53,6 +53,24 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev);
53 53
54#define ACPI_AC_CLASS "ac_adapter" 54#define ACPI_AC_CLASS "ac_adapter"
55 55
56int radeon_pm_get_type_index(struct radeon_device *rdev,
57 enum radeon_pm_state_type ps_type,
58 int instance)
59{
60 int i;
61 int found_instance = -1;
62
63 for (i = 0; i < rdev->pm.num_power_states; i++) {
64 if (rdev->pm.power_state[i].type == ps_type) {
65 found_instance++;
66 if (found_instance == instance)
67 return i;
68 }
69 }
70 /* return default if no match */
71 return rdev->pm.default_power_state_index;
72}
73
56#ifdef CONFIG_ACPI 74#ifdef CONFIG_ACPI
57static int radeon_acpi_event(struct notifier_block *nb, 75static int radeon_acpi_event(struct notifier_block *nb,
58 unsigned long val, 76 unsigned long val,
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 481b99e89f65..b1053d640423 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -62,6 +62,7 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
62{ 62{
63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 63 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 64 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
65 int i;
65 66
66 /* Lock the graphics update lock */ 67 /* Lock the graphics update lock */
67 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 68 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -74,7 +75,11 @@ u32 rs600_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
74 (u32)crtc_base); 75 (u32)crtc_base);
75 76
76 /* Wait for update_pending to go high. */ 77 /* Wait for update_pending to go high. */
77 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 78 for (i = 0; i < rdev->usec_timeout; i++) {
79 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
80 break;
81 udelay(1);
82 }
78 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 83 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
79 84
80 /* Unlock the lock, so double-buffering can take place inside vblank */ 85 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index a983f410ab89..23ae1c60ab3d 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -47,6 +47,7 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
47{ 47{
48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 48 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset); 49 u32 tmp = RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset);
50 int i;
50 51
51 /* Lock the graphics update lock */ 52 /* Lock the graphics update lock */
52 tmp |= AVIVO_D1GRPH_UPDATE_LOCK; 53 tmp |= AVIVO_D1GRPH_UPDATE_LOCK;
@@ -66,7 +67,11 @@ u32 rv770_page_flip(struct radeon_device *rdev, int crtc_id, u64 crtc_base)
66 (u32)crtc_base); 67 (u32)crtc_base);
67 68
68 /* Wait for update_pending to go high. */ 69 /* Wait for update_pending to go high. */
69 while (!(RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)); 70 for (i = 0; i < rdev->usec_timeout; i++) {
71 if (RREG32(AVIVO_D1GRPH_UPDATE + radeon_crtc->crtc_offset) & AVIVO_D1GRPH_SURFACE_UPDATE_PENDING)
72 break;
73 udelay(1);
74 }
70 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n"); 75 DRM_DEBUG("Update pending now high. Unlocking vupdate_lock.\n");
71 76
72 /* Unlock the lock, so double-buffering can take place inside vblank */ 77 /* Unlock the lock, so double-buffering can take place inside vblank */
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 617b64678fc6..0bb0f5f713e6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -574,10 +574,16 @@ retry:
574 return ret; 574 return ret;
575 575
576 spin_lock(&glob->lru_lock); 576 spin_lock(&glob->lru_lock);
577
578 if (unlikely(list_empty(&bo->ddestroy))) {
579 spin_unlock(&glob->lru_lock);
580 return 0;
581 }
582
577 ret = ttm_bo_reserve_locked(bo, interruptible, 583 ret = ttm_bo_reserve_locked(bo, interruptible,
578 no_wait_reserve, false, 0); 584 no_wait_reserve, false, 0);
579 585
580 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { 586 if (unlikely(ret != 0)) {
581 spin_unlock(&glob->lru_lock); 587 spin_unlock(&glob->lru_lock);
582 return ret; 588 return ret;
583 } 589 }
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 3f6343502d1f..5ff561d4e0b4 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -140,7 +140,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
140 goto out_clips; 140 goto out_clips;
141 } 141 }
142 142
143 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 143 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
144 if (clips == NULL) { 144 if (clips == NULL) {
145 DRM_ERROR("Failed to allocate clip rect list.\n"); 145 DRM_ERROR("Failed to allocate clip rect list.\n");
146 ret = -ENOMEM; 146 ret = -ENOMEM;
@@ -232,7 +232,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
232 goto out_clips; 232 goto out_clips;
233 } 233 }
234 234
235 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 235 clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL);
236 if (clips == NULL) { 236 if (clips == NULL) {
237 DRM_ERROR("Failed to allocate clip rect list.\n"); 237 DRM_ERROR("Failed to allocate clip rect list.\n");
238 ret = -ENOMEM; 238 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 03daefa73397..37d40545ed77 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -105,6 +105,10 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
105 struct vmw_dma_buffer *dmabuf = NULL; 105 struct vmw_dma_buffer *dmabuf = NULL;
106 int ret; 106 int ret;
107 107
108 /* A lot of the code assumes this */
109 if (handle && (width != 64 || height != 64))
110 return -EINVAL;
111
108 if (handle) { 112 if (handle) {
109 ret = vmw_user_surface_lookup_handle(dev_priv, tfile, 113 ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
110 handle, &surface); 114 handle, &surface);
@@ -410,8 +414,9 @@ static int do_surface_dirty_sou(struct vmw_private *dev_priv,
410 top = clips->y1; 414 top = clips->y1;
411 bottom = clips->y2; 415 bottom = clips->y2;
412 416
413 clips_ptr = clips; 417 /* skip the first clip rect */
414 for (i = 1; i < num_clips; i++, clips_ptr += inc) { 418 for (i = 1, clips_ptr = clips + inc;
419 i < num_clips; i++, clips_ptr += inc) {
415 left = min_t(int, left, (int)clips_ptr->x1); 420 left = min_t(int, left, (int)clips_ptr->x1);
416 right = max_t(int, right, (int)clips_ptr->x2); 421 right = max_t(int, right, (int)clips_ptr->x2);
417 top = min_t(int, top, (int)clips_ptr->y1); 422 top = min_t(int, top, (int)clips_ptr->y1);
@@ -1323,7 +1328,10 @@ int vmw_kms_close(struct vmw_private *dev_priv)
1323 * drm_encoder_cleanup which takes the lock we deadlock. 1328 * drm_encoder_cleanup which takes the lock we deadlock.
1324 */ 1329 */
1325 drm_mode_config_cleanup(dev_priv->dev); 1330 drm_mode_config_cleanup(dev_priv->dev);
1326 vmw_kms_close_legacy_display_system(dev_priv); 1331 if (dev_priv->sou_priv)
1332 vmw_kms_close_screen_object_display(dev_priv);
1333 else
1334 vmw_kms_close_legacy_display_system(dev_priv);
1327 return 0; 1335 return 0;
1328} 1336}
1329 1337
@@ -1801,7 +1809,8 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1801 } 1809 }
1802 1810
1803 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect); 1811 rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
1804 rects = kzalloc(rects_size, GFP_KERNEL); 1812 rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
1813 GFP_KERNEL);
1805 if (unlikely(!rects)) { 1814 if (unlikely(!rects)) {
1806 ret = -ENOMEM; 1815 ret = -ENOMEM;
1807 goto out_unlock; 1816 goto out_unlock;
@@ -1816,10 +1825,10 @@ int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
1816 } 1825 }
1817 1826
1818 for (i = 0; i < arg->num_outputs; ++i) { 1827 for (i = 0; i < arg->num_outputs; ++i) {
1819 if (rects->x < 0 || 1828 if (rects[i].x < 0 ||
1820 rects->y < 0 || 1829 rects[i].y < 0 ||
1821 rects->x + rects->w > mode_config->max_width || 1830 rects[i].x + rects[i].w > mode_config->max_width ||
1822 rects->y + rects->h > mode_config->max_height) { 1831 rects[i].y + rects[i].h > mode_config->max_height) {
1823 DRM_ERROR("Invalid GUI layout.\n"); 1832 DRM_ERROR("Invalid GUI layout.\n");
1824 ret = -EINVAL; 1833 ret = -EINVAL;
1825 goto out_free; 1834 goto out_free;