aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/drm_crtc.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c57
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c6
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h19
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c12
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c1
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h21
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c411
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c92
-rw-r--r--drivers/gpu/drm/radeon/r300.c94
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c210
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/vga/vgaarb.c62
21 files changed, 620 insertions, 467 deletions
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 405c63b9d539..8323fc389840 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -1873,6 +1873,10 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
1873 } 1873 }
1874 1874
1875 if (num_clips && clips_ptr) { 1875 if (num_clips && clips_ptr) {
1876 if (num_clips < 0 || num_clips > DRM_MODE_FB_DIRTY_MAX_CLIPS) {
1877 ret = -EINVAL;
1878 goto out_err1;
1879 }
1876 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL); 1880 clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
1877 if (!clips) { 1881 if (!clips) {
1878 ret = -ENOMEM; 1882 ret = -ENOMEM;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 68b756253f9f..44a5d0ad8b7c 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -110,10 +110,7 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
110 /* Prevent vblank irq processing while disabling vblank irqs, 110 /* Prevent vblank irq processing while disabling vblank irqs,
111 * so no updates of timestamps or count can happen after we've 111 * so no updates of timestamps or count can happen after we've
112 * disabled. Needed to prevent races in case of delayed irq's. 112 * disabled. Needed to prevent races in case of delayed irq's.
113 * Disable preemption, so vblank_time_lock is held as short as
114 * possible, even under a kernel with PREEMPT_RT patches.
115 */ 113 */
116 preempt_disable();
117 spin_lock_irqsave(&dev->vblank_time_lock, irqflags); 114 spin_lock_irqsave(&dev->vblank_time_lock, irqflags);
118 115
119 dev->driver->disable_vblank(dev, crtc); 116 dev->driver->disable_vblank(dev, crtc);
@@ -164,7 +161,6 @@ static void vblank_disable_and_save(struct drm_device *dev, int crtc)
164 clear_vblank_timestamps(dev, crtc); 161 clear_vblank_timestamps(dev, crtc);
165 162
166 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags); 163 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags);
167 preempt_enable();
168} 164}
169 165
170static void vblank_disable_fn(unsigned long arg) 166static void vblank_disable_fn(unsigned long arg)
@@ -889,10 +885,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
889 spin_lock_irqsave(&dev->vbl_lock, irqflags); 885 spin_lock_irqsave(&dev->vbl_lock, irqflags);
890 /* Going from 0->1 means we have to enable interrupts again */ 886 /* Going from 0->1 means we have to enable interrupts again */
891 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) { 887 if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
892 /* Disable preemption while holding vblank_time_lock. Do
893 * it explicitely to guard against PREEMPT_RT kernel.
894 */
895 preempt_disable();
896 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2); 888 spin_lock_irqsave(&dev->vblank_time_lock, irqflags2);
897 if (!dev->vblank_enabled[crtc]) { 889 if (!dev->vblank_enabled[crtc]) {
898 /* Enable vblank irqs under vblank_time_lock protection. 890 /* Enable vblank irqs under vblank_time_lock protection.
@@ -912,7 +904,6 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
912 } 904 }
913 } 905 }
914 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2); 906 spin_unlock_irqrestore(&dev->vblank_time_lock, irqflags2);
915 preempt_enable();
916 } else { 907 } else {
917 if (!dev->vblank_enabled[crtc]) { 908 if (!dev->vblank_enabled[crtc]) {
918 atomic_dec(&dev->vblank_refcount[crtc]); 909 atomic_dec(&dev->vblank_refcount[crtc]);
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 4f40f1ce1d8e..d09a6e02dc95 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -636,11 +636,16 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
636 struct drm_device *dev = node->minor->dev; 636 struct drm_device *dev = node->minor->dev;
637 drm_i915_private_t *dev_priv = dev->dev_private; 637 drm_i915_private_t *dev_priv = dev->dev_private;
638 struct intel_ring_buffer *ring; 638 struct intel_ring_buffer *ring;
639 int ret;
639 640
640 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data]; 641 ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
641 if (ring->size == 0) 642 if (ring->size == 0)
642 return 0; 643 return 0;
643 644
645 ret = mutex_lock_interruptible(&dev->struct_mutex);
646 if (ret)
647 return ret;
648
644 seq_printf(m, "Ring %s:\n", ring->name); 649 seq_printf(m, "Ring %s:\n", ring->name);
645 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR); 650 seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
646 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR); 651 seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
@@ -654,6 +659,8 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
654 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring)); 659 seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
655 seq_printf(m, " Start : %08x\n", I915_READ_START(ring)); 660 seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
656 661
662 mutex_unlock(&dev->struct_mutex);
663
657 return 0; 664 return 0;
658} 665}
659 666
@@ -842,7 +849,16 @@ static int i915_rstdby_delays(struct seq_file *m, void *unused)
842 struct drm_info_node *node = (struct drm_info_node *) m->private; 849 struct drm_info_node *node = (struct drm_info_node *) m->private;
843 struct drm_device *dev = node->minor->dev; 850 struct drm_device *dev = node->minor->dev;
844 drm_i915_private_t *dev_priv = dev->dev_private; 851 drm_i915_private_t *dev_priv = dev->dev_private;
845 u16 crstanddelay = I915_READ16(CRSTANDVID); 852 u16 crstanddelay;
853 int ret;
854
855 ret = mutex_lock_interruptible(&dev->struct_mutex);
856 if (ret)
857 return ret;
858
859 crstanddelay = I915_READ16(CRSTANDVID);
860
861 mutex_unlock(&dev->struct_mutex);
846 862
847 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f)); 863 seq_printf(m, "w/ctx: %d, w/o ctx: %d\n", (crstanddelay >> 8) & 0x3f, (crstanddelay & 0x3f));
848 864
@@ -940,7 +956,11 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
940 struct drm_device *dev = node->minor->dev; 956 struct drm_device *dev = node->minor->dev;
941 drm_i915_private_t *dev_priv = dev->dev_private; 957 drm_i915_private_t *dev_priv = dev->dev_private;
942 u32 delayfreq; 958 u32 delayfreq;
943 int i; 959 int ret, i;
960
961 ret = mutex_lock_interruptible(&dev->struct_mutex);
962 if (ret)
963 return ret;
944 964
945 for (i = 0; i < 16; i++) { 965 for (i = 0; i < 16; i++) {
946 delayfreq = I915_READ(PXVFREQ_BASE + i * 4); 966 delayfreq = I915_READ(PXVFREQ_BASE + i * 4);
@@ -948,6 +968,8 @@ static int i915_delayfreq_table(struct seq_file *m, void *unused)
948 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT); 968 (delayfreq & PXVFREQ_PX_MASK) >> PXVFREQ_PX_SHIFT);
949 } 969 }
950 970
971 mutex_unlock(&dev->struct_mutex);
972
951 return 0; 973 return 0;
952} 974}
953 975
@@ -962,13 +984,19 @@ static int i915_inttoext_table(struct seq_file *m, void *unused)
962 struct drm_device *dev = node->minor->dev; 984 struct drm_device *dev = node->minor->dev;
963 drm_i915_private_t *dev_priv = dev->dev_private; 985 drm_i915_private_t *dev_priv = dev->dev_private;
964 u32 inttoext; 986 u32 inttoext;
965 int i; 987 int ret, i;
988
989 ret = mutex_lock_interruptible(&dev->struct_mutex);
990 if (ret)
991 return ret;
966 992
967 for (i = 1; i <= 32; i++) { 993 for (i = 1; i <= 32; i++) {
968 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4); 994 inttoext = I915_READ(INTTOEXT_BASE_ILK + i * 4);
969 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext); 995 seq_printf(m, "INTTOEXT%02d: 0x%08x\n", i, inttoext);
970 } 996 }
971 997
998 mutex_unlock(&dev->struct_mutex);
999
972 return 0; 1000 return 0;
973} 1001}
974 1002
@@ -977,9 +1005,19 @@ static int i915_drpc_info(struct seq_file *m, void *unused)
977 struct drm_info_node *node = (struct drm_info_node *) m->private; 1005 struct drm_info_node *node = (struct drm_info_node *) m->private;
978 struct drm_device *dev = node->minor->dev; 1006 struct drm_device *dev = node->minor->dev;
979 drm_i915_private_t *dev_priv = dev->dev_private; 1007 drm_i915_private_t *dev_priv = dev->dev_private;
980 u32 rgvmodectl = I915_READ(MEMMODECTL); 1008 u32 rgvmodectl, rstdbyctl;
981 u32 rstdbyctl = I915_READ(RSTDBYCTL); 1009 u16 crstandvid;
982 u16 crstandvid = I915_READ16(CRSTANDVID); 1010 int ret;
1011
1012 ret = mutex_lock_interruptible(&dev->struct_mutex);
1013 if (ret)
1014 return ret;
1015
1016 rgvmodectl = I915_READ(MEMMODECTL);
1017 rstdbyctl = I915_READ(RSTDBYCTL);
1018 crstandvid = I915_READ16(CRSTANDVID);
1019
1020 mutex_unlock(&dev->struct_mutex);
983 1021
984 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ? 1022 seq_printf(m, "HD boost: %s\n", (rgvmodectl & MEMMODE_BOOST_EN) ?
985 "yes" : "no"); 1023 "yes" : "no");
@@ -1167,9 +1205,16 @@ static int i915_gfxec(struct seq_file *m, void *unused)
1167 struct drm_info_node *node = (struct drm_info_node *) m->private; 1205 struct drm_info_node *node = (struct drm_info_node *) m->private;
1168 struct drm_device *dev = node->minor->dev; 1206 struct drm_device *dev = node->minor->dev;
1169 drm_i915_private_t *dev_priv = dev->dev_private; 1207 drm_i915_private_t *dev_priv = dev->dev_private;
1208 int ret;
1209
1210 ret = mutex_lock_interruptible(&dev->struct_mutex);
1211 if (ret)
1212 return ret;
1170 1213
1171 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4)); 1214 seq_printf(m, "GFXEC: %ld\n", (unsigned long)I915_READ(0x112f4));
1172 1215
1216 mutex_unlock(&dev->struct_mutex);
1217
1173 return 0; 1218 return 0;
1174} 1219}
1175 1220
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index e9c2cfe45daa..15bfa9145d2b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -68,7 +68,7 @@ module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0600);
68MODULE_PARM_DESC(i915_enable_rc6, 68MODULE_PARM_DESC(i915_enable_rc6,
69 "Enable power-saving render C-state 6 (default: true)"); 69 "Enable power-saving render C-state 6 (default: true)");
70 70
71unsigned int i915_enable_fbc __read_mostly = -1; 71int i915_enable_fbc __read_mostly = -1;
72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); 72module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600);
73MODULE_PARM_DESC(i915_enable_fbc, 73MODULE_PARM_DESC(i915_enable_fbc,
74 "Enable frame buffer compression for power savings " 74 "Enable frame buffer compression for power savings "
@@ -80,7 +80,7 @@ MODULE_PARM_DESC(lvds_downclock,
80 "Use panel (LVDS/eDP) downclocking for power savings " 80 "Use panel (LVDS/eDP) downclocking for power savings "
81 "(default: false)"); 81 "(default: false)");
82 82
83unsigned int i915_panel_use_ssc __read_mostly = -1; 83int i915_panel_use_ssc __read_mostly = -1;
84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); 84module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
85MODULE_PARM_DESC(lvds_use_ssc, 85MODULE_PARM_DESC(lvds_use_ssc,
86 "Use Spread Spectrum Clock with panels [LVDS/eDP] " 86 "Use Spread Spectrum Clock with panels [LVDS/eDP] "
@@ -107,7 +107,7 @@ static struct drm_driver driver;
107extern int intel_agp_enabled; 107extern int intel_agp_enabled;
108 108
109#define INTEL_VGA_DEVICE(id, info) { \ 109#define INTEL_VGA_DEVICE(id, info) { \
110 .class = PCI_CLASS_DISPLAY_VGA << 8, \ 110 .class = PCI_BASE_CLASS_DISPLAY << 16, \
111 .class_mask = 0xff0000, \ 111 .class_mask = 0xff0000, \
112 .vendor = 0x8086, \ 112 .vendor = 0x8086, \
113 .device = id, \ 113 .device = id, \
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 06a37f4fd74b..4a9c1b979804 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -126,6 +126,9 @@ struct drm_i915_master_private {
126 struct _drm_i915_sarea *sarea_priv; 126 struct _drm_i915_sarea *sarea_priv;
127}; 127};
128#define I915_FENCE_REG_NONE -1 128#define I915_FENCE_REG_NONE -1
129#define I915_MAX_NUM_FENCES 16
130/* 16 fences + sign bit for FENCE_REG_NONE */
131#define I915_MAX_NUM_FENCE_BITS 5
129 132
130struct drm_i915_fence_reg { 133struct drm_i915_fence_reg {
131 struct list_head lru_list; 134 struct list_head lru_list;
@@ -168,7 +171,7 @@ struct drm_i915_error_state {
168 u32 instdone1; 171 u32 instdone1;
169 u32 seqno; 172 u32 seqno;
170 u64 bbaddr; 173 u64 bbaddr;
171 u64 fence[16]; 174 u64 fence[I915_MAX_NUM_FENCES];
172 struct timeval time; 175 struct timeval time;
173 struct drm_i915_error_object { 176 struct drm_i915_error_object {
174 int page_count; 177 int page_count;
@@ -182,7 +185,7 @@ struct drm_i915_error_state {
182 u32 gtt_offset; 185 u32 gtt_offset;
183 u32 read_domains; 186 u32 read_domains;
184 u32 write_domain; 187 u32 write_domain;
185 s32 fence_reg:5; 188 s32 fence_reg:I915_MAX_NUM_FENCE_BITS;
186 s32 pinned:2; 189 s32 pinned:2;
187 u32 tiling:2; 190 u32 tiling:2;
188 u32 dirty:1; 191 u32 dirty:1;
@@ -375,7 +378,7 @@ typedef struct drm_i915_private {
375 struct notifier_block lid_notifier; 378 struct notifier_block lid_notifier;
376 379
377 int crt_ddc_pin; 380 int crt_ddc_pin;
378 struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */ 381 struct drm_i915_fence_reg fence_regs[I915_MAX_NUM_FENCES]; /* assume 965 */
379 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */ 382 int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
380 int num_fence_regs; /* 8 on pre-965, 16 otherwise */ 383 int num_fence_regs; /* 8 on pre-965, 16 otherwise */
381 384
@@ -506,7 +509,7 @@ typedef struct drm_i915_private {
506 u8 saveAR[21]; 509 u8 saveAR[21];
507 u8 saveDACMASK; 510 u8 saveDACMASK;
508 u8 saveCR[37]; 511 u8 saveCR[37];
509 uint64_t saveFENCE[16]; 512 uint64_t saveFENCE[I915_MAX_NUM_FENCES];
510 u32 saveCURACNTR; 513 u32 saveCURACNTR;
511 u32 saveCURAPOS; 514 u32 saveCURAPOS;
512 u32 saveCURABASE; 515 u32 saveCURABASE;
@@ -777,10 +780,8 @@ struct drm_i915_gem_object {
777 * Fence register bits (if any) for this object. Will be set 780 * Fence register bits (if any) for this object. Will be set
778 * as needed when mapped into the GTT. 781 * as needed when mapped into the GTT.
779 * Protected by dev->struct_mutex. 782 * Protected by dev->struct_mutex.
780 *
781 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
782 */ 783 */
783 signed int fence_reg:5; 784 signed int fence_reg:I915_MAX_NUM_FENCE_BITS;
784 785
785 /** 786 /**
786 * Advice: are the backing pages purgeable? 787 * Advice: are the backing pages purgeable?
@@ -999,10 +1000,10 @@ extern int i915_panel_ignore_lid __read_mostly;
999extern unsigned int i915_powersave __read_mostly; 1000extern unsigned int i915_powersave __read_mostly;
1000extern unsigned int i915_semaphores __read_mostly; 1001extern unsigned int i915_semaphores __read_mostly;
1001extern unsigned int i915_lvds_downclock __read_mostly; 1002extern unsigned int i915_lvds_downclock __read_mostly;
1002extern unsigned int i915_panel_use_ssc __read_mostly; 1003extern int i915_panel_use_ssc __read_mostly;
1003extern int i915_vbt_sdvo_panel_type __read_mostly; 1004extern int i915_vbt_sdvo_panel_type __read_mostly;
1004extern unsigned int i915_enable_rc6 __read_mostly; 1005extern unsigned int i915_enable_rc6 __read_mostly;
1005extern unsigned int i915_enable_fbc __read_mostly; 1006extern int i915_enable_fbc __read_mostly;
1006extern bool i915_enable_hangcheck __read_mostly; 1007extern bool i915_enable_hangcheck __read_mostly;
1007 1008
1008extern int i915_suspend(struct drm_device *dev, pm_message_t state); 1009extern int i915_suspend(struct drm_device *dev, pm_message_t state);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d18b07adcffa..8359dc777041 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -1745,7 +1745,7 @@ static void i915_gem_reset_fences(struct drm_device *dev)
1745 struct drm_i915_private *dev_priv = dev->dev_private; 1745 struct drm_i915_private *dev_priv = dev->dev_private;
1746 int i; 1746 int i;
1747 1747
1748 for (i = 0; i < 16; i++) { 1748 for (i = 0; i < dev_priv->num_fence_regs; i++) {
1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; 1749 struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
1750 struct drm_i915_gem_object *obj = reg->obj; 1750 struct drm_i915_gem_object *obj = reg->obj;
1751 1751
@@ -3512,9 +3512,11 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
3512 * so emit a request to do so. 3512 * so emit a request to do so.
3513 */ 3513 */
3514 request = kzalloc(sizeof(*request), GFP_KERNEL); 3514 request = kzalloc(sizeof(*request), GFP_KERNEL);
3515 if (request) 3515 if (request) {
3516 ret = i915_add_request(obj->ring, NULL, request); 3516 ret = i915_add_request(obj->ring, NULL, request);
3517 else 3517 if (ret)
3518 kfree(request);
3519 } else
3518 ret = -ENOMEM; 3520 ret = -ENOMEM;
3519 } 3521 }
3520 3522
@@ -3613,7 +3615,7 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
3613 obj->base.write_domain = I915_GEM_DOMAIN_CPU; 3615 obj->base.write_domain = I915_GEM_DOMAIN_CPU;
3614 obj->base.read_domains = I915_GEM_DOMAIN_CPU; 3616 obj->base.read_domains = I915_GEM_DOMAIN_CPU;
3615 3617
3616 if (IS_GEN6(dev)) { 3618 if (IS_GEN6(dev) || IS_GEN7(dev)) {
3617 /* On Gen6, we can have the GPU use the LLC (the CPU 3619 /* On Gen6, we can have the GPU use the LLC (the CPU
3618 * cache) for about a 10% performance improvement 3620 * cache) for about a 10% performance improvement
3619 * compared to uncached. Graphics requests other than 3621 * compared to uncached. Graphics requests other than
@@ -3877,7 +3879,7 @@ i915_gem_load(struct drm_device *dev)
3877 INIT_LIST_HEAD(&dev_priv->mm.gtt_list); 3879 INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
3878 for (i = 0; i < I915_NUM_RINGS; i++) 3880 for (i = 0; i < I915_NUM_RINGS; i++)
3879 init_ring_lists(&dev_priv->ring[i]); 3881 init_ring_lists(&dev_priv->ring[i]);
3880 for (i = 0; i < 16; i++) 3882 for (i = 0; i < I915_MAX_NUM_FENCES; i++)
3881 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list); 3883 INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
3882 INIT_DELAYED_WORK(&dev_priv->mm.retire_work, 3884 INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
3883 i915_gem_retire_work_handler); 3885 i915_gem_retire_work_handler);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 9ee2729fe5c6..b40004b55977 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -824,6 +824,7 @@ static void i915_gem_record_fences(struct drm_device *dev,
824 824
825 /* Fences */ 825 /* Fences */
826 switch (INTEL_INFO(dev)->gen) { 826 switch (INTEL_INFO(dev)->gen) {
827 case 7:
827 case 6: 828 case 6:
828 for (i = 0; i < 16; i++) 829 for (i = 0; i < 16; i++)
829 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 830 error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 5a09416e611f..b080cc824001 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -1553,12 +1553,21 @@
1553 */ 1553 */
1554#define PP_READY (1 << 30) 1554#define PP_READY (1 << 30)
1555#define PP_SEQUENCE_NONE (0 << 28) 1555#define PP_SEQUENCE_NONE (0 << 28)
1556#define PP_SEQUENCE_ON (1 << 28) 1556#define PP_SEQUENCE_POWER_UP (1 << 28)
1557#define PP_SEQUENCE_OFF (2 << 28) 1557#define PP_SEQUENCE_POWER_DOWN (2 << 28)
1558#define PP_SEQUENCE_MASK 0x30000000 1558#define PP_SEQUENCE_MASK (3 << 28)
1559#define PP_SEQUENCE_SHIFT 28
1559#define PP_CYCLE_DELAY_ACTIVE (1 << 27) 1560#define PP_CYCLE_DELAY_ACTIVE (1 << 27)
1560#define PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
1561#define PP_SEQUENCE_STATE_MASK 0x0000000f 1561#define PP_SEQUENCE_STATE_MASK 0x0000000f
1562#define PP_SEQUENCE_STATE_OFF_IDLE (0x0 << 0)
1563#define PP_SEQUENCE_STATE_OFF_S0_1 (0x1 << 0)
1564#define PP_SEQUENCE_STATE_OFF_S0_2 (0x2 << 0)
1565#define PP_SEQUENCE_STATE_OFF_S0_3 (0x3 << 0)
1566#define PP_SEQUENCE_STATE_ON_IDLE (0x8 << 0)
1567#define PP_SEQUENCE_STATE_ON_S1_0 (0x9 << 0)
1568#define PP_SEQUENCE_STATE_ON_S1_2 (0xa << 0)
1569#define PP_SEQUENCE_STATE_ON_S1_3 (0xb << 0)
1570#define PP_SEQUENCE_STATE_RESET (0xf << 0)
1562#define PP_CONTROL 0x61204 1571#define PP_CONTROL 0x61204
1563#define POWER_TARGET_ON (1 << 0) 1572#define POWER_TARGET_ON (1 << 0)
1564#define PP_ON_DELAYS 0x61208 1573#define PP_ON_DELAYS 0x61208
@@ -3444,6 +3453,10 @@
3444#define GT_FIFO_FREE_ENTRIES 0x120008 3453#define GT_FIFO_FREE_ENTRIES 0x120008
3445#define GT_FIFO_NUM_RESERVED_ENTRIES 20 3454#define GT_FIFO_NUM_RESERVED_ENTRIES 20
3446 3455
3456#define GEN6_UCGCTL2 0x9404
3457# define GEN6_RCPBUNIT_CLOCK_GATE_DISABLE (1 << 12)
3458# define GEN6_RCCUNIT_CLOCK_GATE_DISABLE (1 << 11)
3459
3447#define GEN6_RPNSWREQ 0xA008 3460#define GEN6_RPNSWREQ 0xA008
3448#define GEN6_TURBO_DISABLE (1<<31) 3461#define GEN6_TURBO_DISABLE (1<<31)
3449#define GEN6_FREQUENCY(x) ((x)<<25) 3462#define GEN6_FREQUENCY(x) ((x)<<25)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index f8f602d76650..7886e4fb60e3 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -370,6 +370,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
370 370
371 /* Fences */ 371 /* Fences */
372 switch (INTEL_INFO(dev)->gen) { 372 switch (INTEL_INFO(dev)->gen) {
373 case 7:
373 case 6: 374 case 6:
374 for (i = 0; i < 16; i++) 375 for (i = 0; i < 16; i++)
375 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8)); 376 dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
@@ -404,6 +405,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
404 405
405 /* Fences */ 406 /* Fences */
406 switch (INTEL_INFO(dev)->gen) { 407 switch (INTEL_INFO(dev)->gen) {
408 case 7:
407 case 6: 409 case 6:
408 for (i = 0; i < 16; i++) 410 for (i = 0; i < 16; i++)
409 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]); 411 I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 981b1f1c04d8..e77a863a3833 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2933,7 +2933,8 @@ static void ironlake_pch_enable(struct drm_crtc *crtc)
2933 2933
2934 /* For PCH DP, enable TRANS_DP_CTL */ 2934 /* For PCH DP, enable TRANS_DP_CTL */
2935 if (HAS_PCH_CPT(dev) && 2935 if (HAS_PCH_CPT(dev) &&
2936 intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) { 2936 (intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT) ||
2937 intel_pipe_has_type(crtc, INTEL_OUTPUT_EDP))) {
2937 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5; 2938 u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPE_BPC_MASK) >> 5;
2938 reg = TRANS_DP_CTL(pipe); 2939 reg = TRANS_DP_CTL(pipe);
2939 temp = I915_READ(reg); 2940 temp = I915_READ(reg);
@@ -4711,7 +4712,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4711 lvds_bpc = 6; 4712 lvds_bpc = 6;
4712 4713
4713 if (lvds_bpc < display_bpc) { 4714 if (lvds_bpc < display_bpc) {
4714 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc); 4715 DRM_DEBUG_KMS("clamping display bpc (was %d) to LVDS (%d)\n", display_bpc, lvds_bpc);
4715 display_bpc = lvds_bpc; 4716 display_bpc = lvds_bpc;
4716 } 4717 }
4717 continue; 4718 continue;
@@ -4722,7 +4723,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4722 unsigned int edp_bpc = dev_priv->edp.bpp / 3; 4723 unsigned int edp_bpc = dev_priv->edp.bpp / 3;
4723 4724
4724 if (edp_bpc < display_bpc) { 4725 if (edp_bpc < display_bpc) {
4725 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc); 4726 DRM_DEBUG_KMS("clamping display bpc (was %d) to eDP (%d)\n", display_bpc, edp_bpc);
4726 display_bpc = edp_bpc; 4727 display_bpc = edp_bpc;
4727 } 4728 }
4728 continue; 4729 continue;
@@ -4737,7 +4738,7 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4737 /* Don't use an invalid EDID bpc value */ 4738 /* Don't use an invalid EDID bpc value */
4738 if (connector->display_info.bpc && 4739 if (connector->display_info.bpc &&
4739 connector->display_info.bpc < display_bpc) { 4740 connector->display_info.bpc < display_bpc) {
4740 DRM_DEBUG_DRIVER("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc); 4741 DRM_DEBUG_KMS("clamping display bpc (was %d) to EDID reported max of %d\n", display_bpc, connector->display_info.bpc);
4741 display_bpc = connector->display_info.bpc; 4742 display_bpc = connector->display_info.bpc;
4742 } 4743 }
4743 } 4744 }
@@ -4748,10 +4749,10 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4748 */ 4749 */
4749 if (intel_encoder->type == INTEL_OUTPUT_HDMI) { 4750 if (intel_encoder->type == INTEL_OUTPUT_HDMI) {
4750 if (display_bpc > 8 && display_bpc < 12) { 4751 if (display_bpc > 8 && display_bpc < 12) {
4751 DRM_DEBUG_DRIVER("forcing bpc to 12 for HDMI\n"); 4752 DRM_DEBUG_KMS("forcing bpc to 12 for HDMI\n");
4752 display_bpc = 12; 4753 display_bpc = 12;
4753 } else { 4754 } else {
4754 DRM_DEBUG_DRIVER("forcing bpc to 8 for HDMI\n"); 4755 DRM_DEBUG_KMS("forcing bpc to 8 for HDMI\n");
4755 display_bpc = 8; 4756 display_bpc = 8;
4756 } 4757 }
4757 } 4758 }
@@ -4789,8 +4790,8 @@ static bool intel_choose_pipe_bpp_dither(struct drm_crtc *crtc,
4789 4790
4790 display_bpc = min(display_bpc, bpc); 4791 display_bpc = min(display_bpc, bpc);
4791 4792
4792 DRM_DEBUG_DRIVER("setting pipe bpc to %d (max display bpc %d)\n", 4793 DRM_DEBUG_KMS("setting pipe bpc to %d (max display bpc %d)\n",
4793 bpc, display_bpc); 4794 bpc, display_bpc);
4794 4795
4795 *pipe_bpp = display_bpc * 3; 4796 *pipe_bpp = display_bpc * 3;
4796 4797
@@ -5671,7 +5672,7 @@ static int ironlake_crtc_mode_set(struct drm_crtc *crtc,
5671 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK; 5672 pipeconf &= ~PIPECONF_DITHER_TYPE_MASK;
5672 if ((is_lvds && dev_priv->lvds_dither) || dither) { 5673 if ((is_lvds && dev_priv->lvds_dither) || dither) {
5673 pipeconf |= PIPECONF_DITHER_EN; 5674 pipeconf |= PIPECONF_DITHER_EN;
5674 pipeconf |= PIPECONF_DITHER_TYPE_ST1; 5675 pipeconf |= PIPECONF_DITHER_TYPE_SP;
5675 } 5676 }
5676 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) { 5677 if (is_dp || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
5677 intel_dp_set_m_n(crtc, mode, adjusted_mode); 5678 intel_dp_set_m_n(crtc, mode, adjusted_mode);
@@ -8148,6 +8149,20 @@ static void gen6_init_clock_gating(struct drm_device *dev)
8148 I915_WRITE(WM2_LP_ILK, 0); 8149 I915_WRITE(WM2_LP_ILK, 0);
8149 I915_WRITE(WM1_LP_ILK, 0); 8150 I915_WRITE(WM1_LP_ILK, 0);
8150 8151
8152 /* According to the BSpec vol1g, bit 12 (RCPBUNIT) clock
8153 * gating disable must be set. Failure to set it results in
8154 * flickering pixels due to Z write ordering failures after
8155 * some amount of runtime in the Mesa "fire" demo, and Unigine
8156 * Sanctuary and Tropics, and apparently anything else with
8157 * alpha test or pixel discard.
8158 *
8159 * According to the spec, bit 11 (RCCUNIT) must also be set,
8160 * but we didn't debug actual testcases to find it out.
8161 */
8162 I915_WRITE(GEN6_UCGCTL2,
8163 GEN6_RCPBUNIT_CLOCK_GATE_DISABLE |
8164 GEN6_RCCUNIT_CLOCK_GATE_DISABLE);
8165
8151 /* 8166 /*
8152 * According to the spec the following bits should be 8167 * According to the spec the following bits should be
8153 * set in order to enable memory self-refresh and fbc: 8168 * set in order to enable memory self-refresh and fbc:
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 09b318b0227f..4d0358fad937 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -59,7 +59,6 @@ struct intel_dp {
59 struct i2c_algo_dp_aux_data algo; 59 struct i2c_algo_dp_aux_data algo;
60 bool is_pch_edp; 60 bool is_pch_edp;
61 uint8_t train_set[4]; 61 uint8_t train_set[4];
62 uint8_t link_status[DP_LINK_STATUS_SIZE];
63 int panel_power_up_delay; 62 int panel_power_up_delay;
64 int panel_power_down_delay; 63 int panel_power_down_delay;
65 int panel_power_cycle_delay; 64 int panel_power_cycle_delay;
@@ -68,7 +67,6 @@ struct intel_dp {
68 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 67 struct drm_display_mode *panel_fixed_mode; /* for eDP */
69 struct delayed_work panel_vdd_work; 68 struct delayed_work panel_vdd_work;
70 bool want_panel_vdd; 69 bool want_panel_vdd;
71 unsigned long panel_off_jiffies;
72}; 70};
73 71
74/** 72/**
@@ -157,16 +155,12 @@ intel_edp_link_config(struct intel_encoder *intel_encoder,
157static int 155static int
158intel_dp_max_lane_count(struct intel_dp *intel_dp) 156intel_dp_max_lane_count(struct intel_dp *intel_dp)
159{ 157{
160 int max_lane_count = 4; 158 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f;
161 159 switch (max_lane_count) {
162 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) { 160 case 1: case 2: case 4:
163 max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 161 break;
164 switch (max_lane_count) { 162 default:
165 case 1: case 2: case 4: 163 max_lane_count = 4;
166 break;
167 default:
168 max_lane_count = 4;
169 }
170 } 164 }
171 return max_lane_count; 165 return max_lane_count;
172} 166}
@@ -768,12 +762,11 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
768 continue; 762 continue;
769 763
770 intel_dp = enc_to_intel_dp(encoder); 764 intel_dp = enc_to_intel_dp(encoder);
771 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) { 765 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
766 intel_dp->base.type == INTEL_OUTPUT_EDP)
767 {
772 lane_count = intel_dp->lane_count; 768 lane_count = intel_dp->lane_count;
773 break; 769 break;
774 } else if (is_edp(intel_dp)) {
775 lane_count = dev_priv->edp.lanes;
776 break;
777 } 770 }
778 } 771 }
779 772
@@ -810,6 +803,7 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
810 struct drm_display_mode *adjusted_mode) 803 struct drm_display_mode *adjusted_mode)
811{ 804{
812 struct drm_device *dev = encoder->dev; 805 struct drm_device *dev = encoder->dev;
806 struct drm_i915_private *dev_priv = dev->dev_private;
813 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 807 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
814 struct drm_crtc *crtc = intel_dp->base.base.crtc; 808 struct drm_crtc *crtc = intel_dp->base.base.crtc;
815 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 809 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -822,18 +816,31 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
822 ironlake_edp_pll_off(encoder); 816 ironlake_edp_pll_off(encoder);
823 } 817 }
824 818
825 intel_dp->DP = DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 819 /*
826 intel_dp->DP |= intel_dp->color_range; 820 * There are three kinds of DP registers:
821 *
822 * IBX PCH
823 * CPU
824 * CPT PCH
825 *
826 * IBX PCH and CPU are the same for almost everything,
827 * except that the CPU DP PLL is configured in this
828 * register
829 *
830 * CPT PCH is quite different, having many bits moved
831 * to the TRANS_DP_CTL register instead. That
832 * configuration happens (oddly) in ironlake_pch_enable
833 */
827 834
828 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 835 /* Preserve the BIOS-computed detected bit. This is
829 intel_dp->DP |= DP_SYNC_HS_HIGH; 836 * supposed to be read-only.
830 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 837 */
831 intel_dp->DP |= DP_SYNC_VS_HIGH; 838 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
839 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
832 840
833 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 841 /* Handle DP bits in common between all three register formats */
834 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 842
835 else 843 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
836 intel_dp->DP |= DP_LINK_TRAIN_OFF;
837 844
838 switch (intel_dp->lane_count) { 845 switch (intel_dp->lane_count) {
839 case 1: 846 case 1:
@@ -852,59 +859,106 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
852 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 859 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
853 intel_write_eld(encoder, adjusted_mode); 860 intel_write_eld(encoder, adjusted_mode);
854 } 861 }
855
856 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 862 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
857 intel_dp->link_configuration[0] = intel_dp->link_bw; 863 intel_dp->link_configuration[0] = intel_dp->link_bw;
858 intel_dp->link_configuration[1] = intel_dp->lane_count; 864 intel_dp->link_configuration[1] = intel_dp->lane_count;
859 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 865 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B;
860
861 /* 866 /*
862 * Check for DPCD version > 1.1 and enhanced framing support 867 * Check for DPCD version > 1.1 and enhanced framing support
863 */ 868 */
864 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 869 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 &&
865 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 870 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) {
866 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 871 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
867 intel_dp->DP |= DP_ENHANCED_FRAMING;
868 } 872 }
869 873
870 /* CPT DP's pipe select is decided in TRANS_DP_CTL */ 874 /* Split out the IBX/CPU vs CPT settings */
871 if (intel_crtc->pipe == 1 && !HAS_PCH_CPT(dev))
872 intel_dp->DP |= DP_PIPEB_SELECT;
873 875
874 if (is_cpu_edp(intel_dp)) { 876 if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) {
875 /* don't miss out required setting for eDP */ 877 intel_dp->DP |= intel_dp->color_range;
876 intel_dp->DP |= DP_PLL_ENABLE; 878
877 if (adjusted_mode->clock < 200000) 879 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
878 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 880 intel_dp->DP |= DP_SYNC_HS_HIGH;
879 else 881 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
880 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 882 intel_dp->DP |= DP_SYNC_VS_HIGH;
883 intel_dp->DP |= DP_LINK_TRAIN_OFF;
884
885 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN)
886 intel_dp->DP |= DP_ENHANCED_FRAMING;
887
888 if (intel_crtc->pipe == 1)
889 intel_dp->DP |= DP_PIPEB_SELECT;
890
891 if (is_cpu_edp(intel_dp)) {
892 /* don't miss out required setting for eDP */
893 intel_dp->DP |= DP_PLL_ENABLE;
894 if (adjusted_mode->clock < 200000)
895 intel_dp->DP |= DP_PLL_FREQ_160MHZ;
896 else
897 intel_dp->DP |= DP_PLL_FREQ_270MHZ;
898 }
899 } else {
900 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
881 } 901 }
882} 902}
883 903
884static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 904#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
905#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE)
906
907#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK)
908#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
909
910#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK)
911#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE)
912
913static void ironlake_wait_panel_status(struct intel_dp *intel_dp,
914 u32 mask,
915 u32 value)
885{ 916{
886 unsigned long off_time; 917 struct drm_device *dev = intel_dp->base.base.dev;
887 unsigned long delay; 918 struct drm_i915_private *dev_priv = dev->dev_private;
888 919
889 DRM_DEBUG_KMS("Wait for panel power off time\n"); 920 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
921 mask, value,
922 I915_READ(PCH_PP_STATUS),
923 I915_READ(PCH_PP_CONTROL));
890 924
891 if (ironlake_edp_have_panel_power(intel_dp) || 925 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) {
892 ironlake_edp_have_panel_vdd(intel_dp)) 926 DRM_ERROR("Panel status timeout: status %08x control %08x\n",
893 { 927 I915_READ(PCH_PP_STATUS),
894 DRM_DEBUG_KMS("Panel still on, no delay needed\n"); 928 I915_READ(PCH_PP_CONTROL));
895 return;
896 } 929 }
930}
897 931
898 off_time = intel_dp->panel_off_jiffies + msecs_to_jiffies(intel_dp->panel_power_down_delay); 932static void ironlake_wait_panel_on(struct intel_dp *intel_dp)
899 if (time_after(jiffies, off_time)) { 933{
900 DRM_DEBUG_KMS("Time already passed"); 934 DRM_DEBUG_KMS("Wait for panel power on\n");
901 return; 935 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE);
902 } 936}
903 delay = jiffies_to_msecs(off_time - jiffies); 937
904 if (delay > intel_dp->panel_power_down_delay) 938static void ironlake_wait_panel_off(struct intel_dp *intel_dp)
905 delay = intel_dp->panel_power_down_delay; 939{
906 DRM_DEBUG_KMS("Waiting an additional %ld ms\n", delay); 940 DRM_DEBUG_KMS("Wait for panel power off time\n");
907 msleep(delay); 941 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE);
942}
943
944static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp)
945{
946 DRM_DEBUG_KMS("Wait for panel power cycle\n");
947 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE);
948}
949
950
951/* Read the current pp_control value, unlocking the register if it
952 * is locked
953 */
954
955static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv)
956{
957 u32 control = I915_READ(PCH_PP_CONTROL);
958
959 control &= ~PANEL_UNLOCK_MASK;
960 control |= PANEL_UNLOCK_REGS;
961 return control;
908} 962}
909 963
910static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 964static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
@@ -921,15 +975,16 @@ static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp)
921 "eDP VDD already requested on\n"); 975 "eDP VDD already requested on\n");
922 976
923 intel_dp->want_panel_vdd = true; 977 intel_dp->want_panel_vdd = true;
978
924 if (ironlake_edp_have_panel_vdd(intel_dp)) { 979 if (ironlake_edp_have_panel_vdd(intel_dp)) {
925 DRM_DEBUG_KMS("eDP VDD already on\n"); 980 DRM_DEBUG_KMS("eDP VDD already on\n");
926 return; 981 return;
927 } 982 }
928 983
929 ironlake_wait_panel_off(intel_dp); 984 if (!ironlake_edp_have_panel_power(intel_dp))
930 pp = I915_READ(PCH_PP_CONTROL); 985 ironlake_wait_panel_power_cycle(intel_dp);
931 pp &= ~PANEL_UNLOCK_MASK; 986
932 pp |= PANEL_UNLOCK_REGS; 987 pp = ironlake_get_pp_control(dev_priv);
933 pp |= EDP_FORCE_VDD; 988 pp |= EDP_FORCE_VDD;
934 I915_WRITE(PCH_PP_CONTROL, pp); 989 I915_WRITE(PCH_PP_CONTROL, pp);
935 POSTING_READ(PCH_PP_CONTROL); 990 POSTING_READ(PCH_PP_CONTROL);
@@ -952,9 +1007,7 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
952 u32 pp; 1007 u32 pp;
953 1008
954 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1009 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) {
955 pp = I915_READ(PCH_PP_CONTROL); 1010 pp = ironlake_get_pp_control(dev_priv);
956 pp &= ~PANEL_UNLOCK_MASK;
957 pp |= PANEL_UNLOCK_REGS;
958 pp &= ~EDP_FORCE_VDD; 1011 pp &= ~EDP_FORCE_VDD;
959 I915_WRITE(PCH_PP_CONTROL, pp); 1012 I915_WRITE(PCH_PP_CONTROL, pp);
960 POSTING_READ(PCH_PP_CONTROL); 1013 POSTING_READ(PCH_PP_CONTROL);
@@ -962,7 +1015,8 @@ static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp)
962 /* Make sure sequencer is idle before allowing subsequent activity */ 1015 /* Make sure sequencer is idle before allowing subsequent activity */
963 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1016 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n",
964 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1017 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL));
965 intel_dp->panel_off_jiffies = jiffies; 1018
1019 msleep(intel_dp->panel_power_down_delay);
966 } 1020 }
967} 1021}
968 1022
@@ -972,9 +1026,9 @@ static void ironlake_panel_vdd_work(struct work_struct *__work)
972 struct intel_dp, panel_vdd_work); 1026 struct intel_dp, panel_vdd_work);
973 struct drm_device *dev = intel_dp->base.base.dev; 1027 struct drm_device *dev = intel_dp->base.base.dev;
974 1028
975 mutex_lock(&dev->struct_mutex); 1029 mutex_lock(&dev->mode_config.mutex);
976 ironlake_panel_vdd_off_sync(intel_dp); 1030 ironlake_panel_vdd_off_sync(intel_dp);
977 mutex_unlock(&dev->struct_mutex); 1031 mutex_unlock(&dev->mode_config.mutex);
978} 1032}
979 1033
980static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1034static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
@@ -984,7 +1038,7 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
984 1038
985 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1039 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd);
986 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1040 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on");
987 1041
988 intel_dp->want_panel_vdd = false; 1042 intel_dp->want_panel_vdd = false;
989 1043
990 if (sync) { 1044 if (sync) {
@@ -1000,23 +1054,25 @@ static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync)
1000 } 1054 }
1001} 1055}
1002 1056
1003/* Returns true if the panel was already on when called */
1004static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1057static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1005{ 1058{
1006 struct drm_device *dev = intel_dp->base.base.dev; 1059 struct drm_device *dev = intel_dp->base.base.dev;
1007 struct drm_i915_private *dev_priv = dev->dev_private; 1060 struct drm_i915_private *dev_priv = dev->dev_private;
1008 u32 pp, idle_on_mask = PP_ON | PP_SEQUENCE_STATE_ON_IDLE; 1061 u32 pp;
1009 1062
1010 if (!is_edp(intel_dp)) 1063 if (!is_edp(intel_dp))
1011 return; 1064 return;
1012 if (ironlake_edp_have_panel_power(intel_dp)) 1065
1066 DRM_DEBUG_KMS("Turn eDP power on\n");
1067
1068 if (ironlake_edp_have_panel_power(intel_dp)) {
1069 DRM_DEBUG_KMS("eDP power already on\n");
1013 return; 1070 return;
1071 }
1014 1072
1015 ironlake_wait_panel_off(intel_dp); 1073 ironlake_wait_panel_power_cycle(intel_dp);
1016 pp = I915_READ(PCH_PP_CONTROL);
1017 pp &= ~PANEL_UNLOCK_MASK;
1018 pp |= PANEL_UNLOCK_REGS;
1019 1074
1075 pp = ironlake_get_pp_control(dev_priv);
1020 if (IS_GEN5(dev)) { 1076 if (IS_GEN5(dev)) {
1021 /* ILK workaround: disable reset around power sequence */ 1077 /* ILK workaround: disable reset around power sequence */
1022 pp &= ~PANEL_POWER_RESET; 1078 pp &= ~PANEL_POWER_RESET;
@@ -1025,13 +1081,13 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1025 } 1081 }
1026 1082
1027 pp |= POWER_TARGET_ON; 1083 pp |= POWER_TARGET_ON;
1084 if (!IS_GEN5(dev))
1085 pp |= PANEL_POWER_RESET;
1086
1028 I915_WRITE(PCH_PP_CONTROL, pp); 1087 I915_WRITE(PCH_PP_CONTROL, pp);
1029 POSTING_READ(PCH_PP_CONTROL); 1088 POSTING_READ(PCH_PP_CONTROL);
1030 1089
1031 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_on_mask) == idle_on_mask, 1090 ironlake_wait_panel_on(intel_dp);
1032 5000))
1033 DRM_ERROR("panel on wait timed out: 0x%08x\n",
1034 I915_READ(PCH_PP_STATUS));
1035 1091
1036 if (IS_GEN5(dev)) { 1092 if (IS_GEN5(dev)) {
1037 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1093 pp |= PANEL_POWER_RESET; /* restore panel reset bit */
@@ -1040,46 +1096,25 @@ static void ironlake_edp_panel_on(struct intel_dp *intel_dp)
1040 } 1096 }
1041} 1097}
1042 1098
1043static void ironlake_edp_panel_off(struct drm_encoder *encoder) 1099static void ironlake_edp_panel_off(struct intel_dp *intel_dp)
1044{ 1100{
1045 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1101 struct drm_device *dev = intel_dp->base.base.dev;
1046 struct drm_device *dev = encoder->dev;
1047 struct drm_i915_private *dev_priv = dev->dev_private; 1102 struct drm_i915_private *dev_priv = dev->dev_private;
1048 u32 pp, idle_off_mask = PP_ON | PP_SEQUENCE_MASK | 1103 u32 pp;
1049 PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK;
1050 1104
1051 if (!is_edp(intel_dp)) 1105 if (!is_edp(intel_dp))
1052 return; 1106 return;
1053 pp = I915_READ(PCH_PP_CONTROL);
1054 pp &= ~PANEL_UNLOCK_MASK;
1055 pp |= PANEL_UNLOCK_REGS;
1056 1107
1057 if (IS_GEN5(dev)) { 1108 DRM_DEBUG_KMS("Turn eDP power off\n");
1058 /* ILK workaround: disable reset around power sequence */
1059 pp &= ~PANEL_POWER_RESET;
1060 I915_WRITE(PCH_PP_CONTROL, pp);
1061 POSTING_READ(PCH_PP_CONTROL);
1062 }
1063 1109
1064 intel_dp->panel_off_jiffies = jiffies; 1110 WARN(intel_dp->want_panel_vdd, "Cannot turn power off while VDD is on\n");
1065 1111
1066 if (IS_GEN5(dev)) { 1112 pp = ironlake_get_pp_control(dev_priv);
1067 pp &= ~POWER_TARGET_ON; 1113 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
1068 I915_WRITE(PCH_PP_CONTROL, pp); 1114 I915_WRITE(PCH_PP_CONTROL, pp);
1069 POSTING_READ(PCH_PP_CONTROL); 1115 POSTING_READ(PCH_PP_CONTROL);
1070 pp &= ~POWER_TARGET_ON;
1071 I915_WRITE(PCH_PP_CONTROL, pp);
1072 POSTING_READ(PCH_PP_CONTROL);
1073 msleep(intel_dp->panel_power_cycle_delay);
1074
1075 if (wait_for((I915_READ(PCH_PP_STATUS) & idle_off_mask) == 0, 5000))
1076 DRM_ERROR("panel off wait timed out: 0x%08x\n",
1077 I915_READ(PCH_PP_STATUS));
1078 1116
1079 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1117 ironlake_wait_panel_off(intel_dp);
1080 I915_WRITE(PCH_PP_CONTROL, pp);
1081 POSTING_READ(PCH_PP_CONTROL);
1082 }
1083} 1118}
1084 1119
1085static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1120static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1099,9 +1134,7 @@ static void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
1099 * allowing it to appear. 1134 * allowing it to appear.
1100 */ 1135 */
1101 msleep(intel_dp->backlight_on_delay); 1136 msleep(intel_dp->backlight_on_delay);
1102 pp = I915_READ(PCH_PP_CONTROL); 1137 pp = ironlake_get_pp_control(dev_priv);
1103 pp &= ~PANEL_UNLOCK_MASK;
1104 pp |= PANEL_UNLOCK_REGS;
1105 pp |= EDP_BLC_ENABLE; 1138 pp |= EDP_BLC_ENABLE;
1106 I915_WRITE(PCH_PP_CONTROL, pp); 1139 I915_WRITE(PCH_PP_CONTROL, pp);
1107 POSTING_READ(PCH_PP_CONTROL); 1140 POSTING_READ(PCH_PP_CONTROL);
@@ -1117,9 +1150,7 @@ static void ironlake_edp_backlight_off(struct intel_dp *intel_dp)
1117 return; 1150 return;
1118 1151
1119 DRM_DEBUG_KMS("\n"); 1152 DRM_DEBUG_KMS("\n");
1120 pp = I915_READ(PCH_PP_CONTROL); 1153 pp = ironlake_get_pp_control(dev_priv);
1121 pp &= ~PANEL_UNLOCK_MASK;
1122 pp |= PANEL_UNLOCK_REGS;
1123 pp &= ~EDP_BLC_ENABLE; 1154 pp &= ~EDP_BLC_ENABLE;
1124 I915_WRITE(PCH_PP_CONTROL, pp); 1155 I915_WRITE(PCH_PP_CONTROL, pp);
1125 POSTING_READ(PCH_PP_CONTROL); 1156 POSTING_READ(PCH_PP_CONTROL);
@@ -1187,17 +1218,18 @@ static void intel_dp_prepare(struct drm_encoder *encoder)
1187{ 1218{
1188 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 1219 struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
1189 1220
1221 ironlake_edp_backlight_off(intel_dp);
1222 ironlake_edp_panel_off(intel_dp);
1223
1190 /* Wake up the sink first */ 1224 /* Wake up the sink first */
1191 ironlake_edp_panel_vdd_on(intel_dp); 1225 ironlake_edp_panel_vdd_on(intel_dp);
1192 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1226 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON);
1227 intel_dp_link_down(intel_dp);
1193 ironlake_edp_panel_vdd_off(intel_dp, false); 1228 ironlake_edp_panel_vdd_off(intel_dp, false);
1194 1229
1195 /* Make sure the panel is off before trying to 1230 /* Make sure the panel is off before trying to
1196 * change the mode 1231 * change the mode
1197 */ 1232 */
1198 ironlake_edp_backlight_off(intel_dp);
1199 intel_dp_link_down(intel_dp);
1200 ironlake_edp_panel_off(encoder);
1201} 1233}
1202 1234
1203static void intel_dp_commit(struct drm_encoder *encoder) 1235static void intel_dp_commit(struct drm_encoder *encoder)
@@ -1211,7 +1243,6 @@ static void intel_dp_commit(struct drm_encoder *encoder)
1211 intel_dp_start_link_train(intel_dp); 1243 intel_dp_start_link_train(intel_dp);
1212 ironlake_edp_panel_on(intel_dp); 1244 ironlake_edp_panel_on(intel_dp);
1213 ironlake_edp_panel_vdd_off(intel_dp, true); 1245 ironlake_edp_panel_vdd_off(intel_dp, true);
1214
1215 intel_dp_complete_link_train(intel_dp); 1246 intel_dp_complete_link_train(intel_dp);
1216 ironlake_edp_backlight_on(intel_dp); 1247 ironlake_edp_backlight_on(intel_dp);
1217 1248
@@ -1230,16 +1261,20 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1230 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1261 uint32_t dp_reg = I915_READ(intel_dp->output_reg);
1231 1262
1232 if (mode != DRM_MODE_DPMS_ON) { 1263 if (mode != DRM_MODE_DPMS_ON) {
1264 ironlake_edp_backlight_off(intel_dp);
1265 ironlake_edp_panel_off(intel_dp);
1266
1233 ironlake_edp_panel_vdd_on(intel_dp); 1267 ironlake_edp_panel_vdd_on(intel_dp);
1234 if (is_edp(intel_dp))
1235 ironlake_edp_backlight_off(intel_dp);
1236 intel_dp_sink_dpms(intel_dp, mode); 1268 intel_dp_sink_dpms(intel_dp, mode);
1237 intel_dp_link_down(intel_dp); 1269 intel_dp_link_down(intel_dp);
1238 ironlake_edp_panel_off(encoder);
1239 if (is_edp(intel_dp) && !is_pch_edp(intel_dp))
1240 ironlake_edp_pll_off(encoder);
1241 ironlake_edp_panel_vdd_off(intel_dp, false); 1270 ironlake_edp_panel_vdd_off(intel_dp, false);
1271
1272 if (is_cpu_edp(intel_dp))
1273 ironlake_edp_pll_off(encoder);
1242 } else { 1274 } else {
1275 if (is_cpu_edp(intel_dp))
1276 ironlake_edp_pll_on(encoder);
1277
1243 ironlake_edp_panel_vdd_on(intel_dp); 1278 ironlake_edp_panel_vdd_on(intel_dp);
1244 intel_dp_sink_dpms(intel_dp, mode); 1279 intel_dp_sink_dpms(intel_dp, mode);
1245 if (!(dp_reg & DP_PORT_EN)) { 1280 if (!(dp_reg & DP_PORT_EN)) {
@@ -1247,7 +1282,6 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
1247 ironlake_edp_panel_on(intel_dp); 1282 ironlake_edp_panel_on(intel_dp);
1248 ironlake_edp_panel_vdd_off(intel_dp, true); 1283 ironlake_edp_panel_vdd_off(intel_dp, true);
1249 intel_dp_complete_link_train(intel_dp); 1284 intel_dp_complete_link_train(intel_dp);
1250 ironlake_edp_backlight_on(intel_dp);
1251 } else 1285 } else
1252 ironlake_edp_panel_vdd_off(intel_dp, false); 1286 ironlake_edp_panel_vdd_off(intel_dp, false);
1253 ironlake_edp_backlight_on(intel_dp); 1287 ironlake_edp_backlight_on(intel_dp);
@@ -1285,11 +1319,11 @@ intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address,
1285 * link status information 1319 * link status information
1286 */ 1320 */
1287static bool 1321static bool
1288intel_dp_get_link_status(struct intel_dp *intel_dp) 1322intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1289{ 1323{
1290 return intel_dp_aux_native_read_retry(intel_dp, 1324 return intel_dp_aux_native_read_retry(intel_dp,
1291 DP_LANE0_1_STATUS, 1325 DP_LANE0_1_STATUS,
1292 intel_dp->link_status, 1326 link_status,
1293 DP_LINK_STATUS_SIZE); 1327 DP_LINK_STATUS_SIZE);
1294} 1328}
1295 1329
@@ -1301,27 +1335,25 @@ intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1301} 1335}
1302 1336
1303static uint8_t 1337static uint8_t
1304intel_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE], 1338intel_get_adjust_request_voltage(uint8_t adjust_request[2],
1305 int lane) 1339 int lane)
1306{ 1340{
1307 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1308 int s = ((lane & 1) ? 1341 int s = ((lane & 1) ?
1309 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1342 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
1310 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1343 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
1311 uint8_t l = intel_dp_link_status(link_status, i); 1344 uint8_t l = adjust_request[lane>>1];
1312 1345
1313 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1346 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
1314} 1347}
1315 1348
1316static uint8_t 1349static uint8_t
1317intel_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE], 1350intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2],
1318 int lane) 1351 int lane)
1319{ 1352{
1320 int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
1321 int s = ((lane & 1) ? 1353 int s = ((lane & 1) ?
1322 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1354 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
1323 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1355 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
1324 uint8_t l = intel_dp_link_status(link_status, i); 1356 uint8_t l = adjust_request[lane>>1];
1325 1357
1326 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1358 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
1327} 1359}
@@ -1344,6 +1376,7 @@ static char *link_train_names[] = {
1344 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1376 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB
1345 */ 1377 */
1346#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800 1378#define I830_DP_VOLTAGE_MAX DP_TRAIN_VOLTAGE_SWING_800
1379#define I830_DP_VOLTAGE_MAX_CPT DP_TRAIN_VOLTAGE_SWING_1200
1347 1380
1348static uint8_t 1381static uint8_t
1349intel_dp_pre_emphasis_max(uint8_t voltage_swing) 1382intel_dp_pre_emphasis_max(uint8_t voltage_swing)
@@ -1362,15 +1395,18 @@ intel_dp_pre_emphasis_max(uint8_t voltage_swing)
1362} 1395}
1363 1396
1364static void 1397static void
1365intel_get_adjust_train(struct intel_dp *intel_dp) 1398intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1366{ 1399{
1400 struct drm_device *dev = intel_dp->base.base.dev;
1367 uint8_t v = 0; 1401 uint8_t v = 0;
1368 uint8_t p = 0; 1402 uint8_t p = 0;
1369 int lane; 1403 int lane;
1404 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS);
1405 int voltage_max;
1370 1406
1371 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1407 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1372 uint8_t this_v = intel_get_adjust_request_voltage(intel_dp->link_status, lane); 1408 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane);
1373 uint8_t this_p = intel_get_adjust_request_pre_emphasis(intel_dp->link_status, lane); 1409 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane);
1374 1410
1375 if (this_v > v) 1411 if (this_v > v)
1376 v = this_v; 1412 v = this_v;
@@ -1378,8 +1414,12 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1378 p = this_p; 1414 p = this_p;
1379 } 1415 }
1380 1416
1381 if (v >= I830_DP_VOLTAGE_MAX) 1417 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1382 v = I830_DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED; 1418 voltage_max = I830_DP_VOLTAGE_MAX_CPT;
1419 else
1420 voltage_max = I830_DP_VOLTAGE_MAX;
1421 if (v >= voltage_max)
1422 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED;
1383 1423
1384 if (p >= intel_dp_pre_emphasis_max(v)) 1424 if (p >= intel_dp_pre_emphasis_max(v))
1385 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1425 p = intel_dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
@@ -1389,7 +1429,7 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
1389} 1429}
1390 1430
1391static uint32_t 1431static uint32_t
1392intel_dp_signal_levels(uint8_t train_set, int lane_count) 1432intel_dp_signal_levels(uint8_t train_set)
1393{ 1433{
1394 uint32_t signal_levels = 0; 1434 uint32_t signal_levels = 0;
1395 1435
@@ -1458,9 +1498,8 @@ static uint8_t
1458intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1498intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE],
1459 int lane) 1499 int lane)
1460{ 1500{
1461 int i = DP_LANE0_1_STATUS + (lane >> 1);
1462 int s = (lane & 1) * 4; 1501 int s = (lane & 1) * 4;
1463 uint8_t l = intel_dp_link_status(link_status, i); 1502 uint8_t l = link_status[lane>>1];
1464 1503
1465 return (l >> s) & 0xf; 1504 return (l >> s) & 0xf;
1466} 1505}
@@ -1485,18 +1524,18 @@ intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count
1485 DP_LANE_CHANNEL_EQ_DONE|\ 1524 DP_LANE_CHANNEL_EQ_DONE|\
1486 DP_LANE_SYMBOL_LOCKED) 1525 DP_LANE_SYMBOL_LOCKED)
1487static bool 1526static bool
1488intel_channel_eq_ok(struct intel_dp *intel_dp) 1527intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE])
1489{ 1528{
1490 uint8_t lane_align; 1529 uint8_t lane_align;
1491 uint8_t lane_status; 1530 uint8_t lane_status;
1492 int lane; 1531 int lane;
1493 1532
1494 lane_align = intel_dp_link_status(intel_dp->link_status, 1533 lane_align = intel_dp_link_status(link_status,
1495 DP_LANE_ALIGN_STATUS_UPDATED); 1534 DP_LANE_ALIGN_STATUS_UPDATED);
1496 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1535 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
1497 return false; 1536 return false;
1498 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1537 for (lane = 0; lane < intel_dp->lane_count; lane++) {
1499 lane_status = intel_get_lane_status(intel_dp->link_status, lane); 1538 lane_status = intel_get_lane_status(link_status, lane);
1500 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1539 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS)
1501 return false; 1540 return false;
1502 } 1541 }
@@ -1521,8 +1560,9 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
1521 1560
1522 ret = intel_dp_aux_native_write(intel_dp, 1561 ret = intel_dp_aux_native_write(intel_dp,
1523 DP_TRAINING_LANE0_SET, 1562 DP_TRAINING_LANE0_SET,
1524 intel_dp->train_set, 4); 1563 intel_dp->train_set,
1525 if (ret != 4) 1564 intel_dp->lane_count);
1565 if (ret != intel_dp->lane_count)
1526 return false; 1566 return false;
1527 1567
1528 return true; 1568 return true;
@@ -1538,7 +1578,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1538 int i; 1578 int i;
1539 uint8_t voltage; 1579 uint8_t voltage;
1540 bool clock_recovery = false; 1580 bool clock_recovery = false;
1541 int tries; 1581 int voltage_tries, loop_tries;
1542 u32 reg; 1582 u32 reg;
1543 uint32_t DP = intel_dp->DP; 1583 uint32_t DP = intel_dp->DP;
1544 1584
@@ -1565,16 +1605,20 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1565 DP &= ~DP_LINK_TRAIN_MASK; 1605 DP &= ~DP_LINK_TRAIN_MASK;
1566 memset(intel_dp->train_set, 0, 4); 1606 memset(intel_dp->train_set, 0, 4);
1567 voltage = 0xff; 1607 voltage = 0xff;
1568 tries = 0; 1608 voltage_tries = 0;
1609 loop_tries = 0;
1569 clock_recovery = false; 1610 clock_recovery = false;
1570 for (;;) { 1611 for (;;) {
1571 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1612 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1613 uint8_t link_status[DP_LINK_STATUS_SIZE];
1572 uint32_t signal_levels; 1614 uint32_t signal_levels;
1573 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1615
1616 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1574 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1617 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1575 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1618 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1576 } else { 1619 } else {
1577 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1620 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1621 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels);
1578 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1622 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1579 } 1623 }
1580 1624
@@ -1590,10 +1634,13 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1590 /* Set training pattern 1 */ 1634 /* Set training pattern 1 */
1591 1635
1592 udelay(100); 1636 udelay(100);
1593 if (!intel_dp_get_link_status(intel_dp)) 1637 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1638 DRM_ERROR("failed to get link status\n");
1594 break; 1639 break;
1640 }
1595 1641
1596 if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1642 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1643 DRM_DEBUG_KMS("clock recovery OK\n");
1597 clock_recovery = true; 1644 clock_recovery = true;
1598 break; 1645 break;
1599 } 1646 }
@@ -1602,20 +1649,30 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
1602 for (i = 0; i < intel_dp->lane_count; i++) 1649 for (i = 0; i < intel_dp->lane_count; i++)
1603 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1650 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
1604 break; 1651 break;
1605 if (i == intel_dp->lane_count) 1652 if (i == intel_dp->lane_count) {
1606 break; 1653 ++loop_tries;
1654 if (loop_tries == 5) {
1655 DRM_DEBUG_KMS("too many full retries, give up\n");
1656 break;
1657 }
1658 memset(intel_dp->train_set, 0, 4);
1659 voltage_tries = 0;
1660 continue;
1661 }
1607 1662
1608 /* Check to see if we've tried the same voltage 5 times */ 1663 /* Check to see if we've tried the same voltage 5 times */
1609 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1664 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
1610 ++tries; 1665 ++voltage_tries;
1611 if (tries == 5) 1666 if (voltage_tries == 5) {
1667 DRM_DEBUG_KMS("too many voltage retries, give up\n");
1612 break; 1668 break;
1669 }
1613 } else 1670 } else
1614 tries = 0; 1671 voltage_tries = 0;
1615 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1672 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
1616 1673
1617 /* Compute new intel_dp->train_set as requested by target */ 1674 /* Compute new intel_dp->train_set as requested by target */
1618 intel_get_adjust_train(intel_dp); 1675 intel_get_adjust_train(intel_dp, link_status);
1619 } 1676 }
1620 1677
1621 intel_dp->DP = DP; 1678 intel_dp->DP = DP;
@@ -1638,6 +1695,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1638 for (;;) { 1695 for (;;) {
1639 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1696 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */
1640 uint32_t signal_levels; 1697 uint32_t signal_levels;
1698 uint8_t link_status[DP_LINK_STATUS_SIZE];
1641 1699
1642 if (cr_tries > 5) { 1700 if (cr_tries > 5) {
1643 DRM_ERROR("failed to train DP, aborting\n"); 1701 DRM_ERROR("failed to train DP, aborting\n");
@@ -1645,11 +1703,11 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1645 break; 1703 break;
1646 } 1704 }
1647 1705
1648 if (IS_GEN6(dev) && is_edp(intel_dp)) { 1706 if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) {
1649 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1707 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
1650 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1708 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
1651 } else { 1709 } else {
1652 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count); 1710 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]);
1653 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1711 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
1654 } 1712 }
1655 1713
@@ -1665,17 +1723,17 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1665 break; 1723 break;
1666 1724
1667 udelay(400); 1725 udelay(400);
1668 if (!intel_dp_get_link_status(intel_dp)) 1726 if (!intel_dp_get_link_status(intel_dp, link_status))
1669 break; 1727 break;
1670 1728
1671 /* Make sure clock is still ok */ 1729 /* Make sure clock is still ok */
1672 if (!intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) { 1730 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) {
1673 intel_dp_start_link_train(intel_dp); 1731 intel_dp_start_link_train(intel_dp);
1674 cr_tries++; 1732 cr_tries++;
1675 continue; 1733 continue;
1676 } 1734 }
1677 1735
1678 if (intel_channel_eq_ok(intel_dp)) { 1736 if (intel_channel_eq_ok(intel_dp, link_status)) {
1679 channel_eq = true; 1737 channel_eq = true;
1680 break; 1738 break;
1681 } 1739 }
@@ -1690,7 +1748,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
1690 } 1748 }
1691 1749
1692 /* Compute new intel_dp->train_set as requested by target */ 1750 /* Compute new intel_dp->train_set as requested by target */
1693 intel_get_adjust_train(intel_dp); 1751 intel_get_adjust_train(intel_dp, link_status);
1694 ++tries; 1752 ++tries;
1695 } 1753 }
1696 1754
@@ -1735,8 +1793,12 @@ intel_dp_link_down(struct intel_dp *intel_dp)
1735 1793
1736 msleep(17); 1794 msleep(17);
1737 1795
1738 if (is_edp(intel_dp)) 1796 if (is_edp(intel_dp)) {
1739 DP |= DP_LINK_TRAIN_OFF; 1797 if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp))
1798 DP |= DP_LINK_TRAIN_OFF_CPT;
1799 else
1800 DP |= DP_LINK_TRAIN_OFF;
1801 }
1740 1802
1741 if (!HAS_PCH_CPT(dev) && 1803 if (!HAS_PCH_CPT(dev) &&
1742 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1804 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
@@ -1822,6 +1884,7 @@ static void
1822intel_dp_check_link_status(struct intel_dp *intel_dp) 1884intel_dp_check_link_status(struct intel_dp *intel_dp)
1823{ 1885{
1824 u8 sink_irq_vector; 1886 u8 sink_irq_vector;
1887 u8 link_status[DP_LINK_STATUS_SIZE];
1825 1888
1826 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON) 1889 if (intel_dp->dpms_mode != DRM_MODE_DPMS_ON)
1827 return; 1890 return;
@@ -1830,7 +1893,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1830 return; 1893 return;
1831 1894
1832 /* Try to read receiver status if the link appears to be up */ 1895 /* Try to read receiver status if the link appears to be up */
1833 if (!intel_dp_get_link_status(intel_dp)) { 1896 if (!intel_dp_get_link_status(intel_dp, link_status)) {
1834 intel_dp_link_down(intel_dp); 1897 intel_dp_link_down(intel_dp);
1835 return; 1898 return;
1836 } 1899 }
@@ -1855,7 +1918,7 @@ intel_dp_check_link_status(struct intel_dp *intel_dp)
1855 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 1918 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n");
1856 } 1919 }
1857 1920
1858 if (!intel_channel_eq_ok(intel_dp)) { 1921 if (!intel_channel_eq_ok(intel_dp, link_status)) {
1859 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 1922 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n",
1860 drm_get_encoder_name(&intel_dp->base.base)); 1923 drm_get_encoder_name(&intel_dp->base.base));
1861 intel_dp_start_link_train(intel_dp); 1924 intel_dp_start_link_train(intel_dp);
@@ -2179,7 +2242,8 @@ intel_trans_dp_port_sel(struct drm_crtc *crtc)
2179 continue; 2242 continue;
2180 2243
2181 intel_dp = enc_to_intel_dp(encoder); 2244 intel_dp = enc_to_intel_dp(encoder);
2182 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT) 2245 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT ||
2246 intel_dp->base.type == INTEL_OUTPUT_EDP)
2183 return intel_dp->output_reg; 2247 return intel_dp->output_reg;
2184 } 2248 }
2185 2249
@@ -2321,7 +2385,7 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2321 2385
2322 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2386 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >>
2323 PANEL_LIGHT_ON_DELAY_SHIFT; 2387 PANEL_LIGHT_ON_DELAY_SHIFT;
2324 2388
2325 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2389 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >>
2326 PANEL_LIGHT_OFF_DELAY_SHIFT; 2390 PANEL_LIGHT_OFF_DELAY_SHIFT;
2327 2391
@@ -2354,11 +2418,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2354 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2418 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
2355 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2419 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
2356 2420
2357 intel_dp->panel_off_jiffies = jiffies - intel_dp->panel_power_down_delay;
2358
2359 ironlake_edp_panel_vdd_on(intel_dp); 2421 ironlake_edp_panel_vdd_on(intel_dp);
2360 ret = intel_dp_get_dpcd(intel_dp); 2422 ret = intel_dp_get_dpcd(intel_dp);
2361 ironlake_edp_panel_vdd_off(intel_dp, false); 2423 ironlake_edp_panel_vdd_off(intel_dp, false);
2424
2362 if (ret) { 2425 if (ret) {
2363 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2426 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11)
2364 dev_priv->no_aux_handshake = 2427 dev_priv->no_aux_handshake =
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 499d4c0dbeeb..21f60b7d69a3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -326,7 +326,8 @@ static int intel_panel_update_status(struct backlight_device *bd)
326static int intel_panel_get_brightness(struct backlight_device *bd) 326static int intel_panel_get_brightness(struct backlight_device *bd)
327{ 327{
328 struct drm_device *dev = bl_get_data(bd); 328 struct drm_device *dev = bl_get_data(bd);
329 return intel_panel_get_backlight(dev); 329 struct drm_i915_private *dev_priv = dev->dev_private;
330 return dev_priv->backlight_level;
330} 331}
331 332
332static const struct backlight_ops intel_panel_bl_ops = { 333static const struct backlight_ops intel_panel_bl_ops = {
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 7fdfa8ea7570..38e1bda73d33 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -480,21 +480,23 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
480 } 480 }
481 break; 481 break;
482 case DB_Z_INFO: 482 case DB_Z_INFO:
483 r = evergreen_cs_packet_next_reloc(p, &reloc);
484 if (r) {
485 dev_warn(p->dev, "bad SET_CONTEXT_REG "
486 "0x%04X\n", reg);
487 return -EINVAL;
488 }
489 track->db_z_info = radeon_get_ib_value(p, idx); 483 track->db_z_info = radeon_get_ib_value(p, idx);
490 ib[idx] &= ~Z_ARRAY_MODE(0xf); 484 if (!p->keep_tiling_flags) {
491 track->db_z_info &= ~Z_ARRAY_MODE(0xf); 485 r = evergreen_cs_packet_next_reloc(p, &reloc);
492 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 486 if (r) {
493 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 487 dev_warn(p->dev, "bad SET_CONTEXT_REG "
494 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 488 "0x%04X\n", reg);
495 } else { 489 return -EINVAL;
496 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 490 }
497 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 491 ib[idx] &= ~Z_ARRAY_MODE(0xf);
492 track->db_z_info &= ~Z_ARRAY_MODE(0xf);
493 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
494 ib[idx] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
495 track->db_z_info |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
496 } else {
497 ib[idx] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
498 track->db_z_info |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
499 }
498 } 500 }
499 break; 501 break;
500 case DB_STENCIL_INFO: 502 case DB_STENCIL_INFO:
@@ -607,40 +609,44 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
607 case CB_COLOR5_INFO: 609 case CB_COLOR5_INFO:
608 case CB_COLOR6_INFO: 610 case CB_COLOR6_INFO:
609 case CB_COLOR7_INFO: 611 case CB_COLOR7_INFO:
610 r = evergreen_cs_packet_next_reloc(p, &reloc);
611 if (r) {
612 dev_warn(p->dev, "bad SET_CONTEXT_REG "
613 "0x%04X\n", reg);
614 return -EINVAL;
615 }
616 tmp = (reg - CB_COLOR0_INFO) / 0x3c; 612 tmp = (reg - CB_COLOR0_INFO) / 0x3c;
617 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 613 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
618 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 614 if (!p->keep_tiling_flags) {
619 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 615 r = evergreen_cs_packet_next_reloc(p, &reloc);
620 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 616 if (r) {
621 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 617 dev_warn(p->dev, "bad SET_CONTEXT_REG "
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 618 "0x%04X\n", reg);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 619 return -EINVAL;
620 }
621 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
622 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
623 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
624 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
625 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
626 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
627 }
624 } 628 }
625 break; 629 break;
626 case CB_COLOR8_INFO: 630 case CB_COLOR8_INFO:
627 case CB_COLOR9_INFO: 631 case CB_COLOR9_INFO:
628 case CB_COLOR10_INFO: 632 case CB_COLOR10_INFO:
629 case CB_COLOR11_INFO: 633 case CB_COLOR11_INFO:
630 r = evergreen_cs_packet_next_reloc(p, &reloc);
631 if (r) {
632 dev_warn(p->dev, "bad SET_CONTEXT_REG "
633 "0x%04X\n", reg);
634 return -EINVAL;
635 }
636 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8; 634 tmp = ((reg - CB_COLOR8_INFO) / 0x1c) + 8;
637 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx); 635 track->cb_color_info[tmp] = radeon_get_ib_value(p, idx);
638 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) { 636 if (!p->keep_tiling_flags) {
639 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 637 r = evergreen_cs_packet_next_reloc(p, &reloc);
640 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 638 if (r) {
641 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) { 639 dev_warn(p->dev, "bad SET_CONTEXT_REG "
642 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 640 "0x%04X\n", reg);
643 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 641 return -EINVAL;
642 }
643 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) {
644 ib[idx] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
645 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
646 } else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) {
647 ib[idx] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
648 track->cb_color_info[tmp] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
649 }
644 } 650 }
645 break; 651 break;
646 case CB_COLOR0_PITCH: 652 case CB_COLOR0_PITCH:
@@ -1311,10 +1317,12 @@ static int evergreen_packet3_check(struct radeon_cs_parser *p,
1311 return -EINVAL; 1317 return -EINVAL;
1312 } 1318 }
1313 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1319 ib[idx+1+(i*8)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1314 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1320 if (!p->keep_tiling_flags) {
1315 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1); 1321 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1316 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1322 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1);
1317 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1); 1323 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1324 ib[idx+1+(i*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1);
1325 }
1318 texture = reloc->robj; 1326 texture = reloc->robj;
1319 /* tex mip base */ 1327 /* tex mip base */
1320 r = evergreen_cs_packet_next_reloc(p, &reloc); 1328 r = evergreen_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 400b26df652a..c93bc64707e1 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -701,16 +701,21 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
701 return r; 701 return r;
702 } 702 }
703 703
704 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 704 if (p->keep_tiling_flags) {
705 tile_flags |= R300_TXO_MACRO_TILE; 705 ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */
706 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 706 ((idx_value & ~31) + (u32)reloc->lobj.gpu_offset);
707 tile_flags |= R300_TXO_MICRO_TILE; 707 } else {
708 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 708 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
709 tile_flags |= R300_TXO_MICRO_TILE_SQUARE; 709 tile_flags |= R300_TXO_MACRO_TILE;
710 710 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
711 tmp = idx_value + ((u32)reloc->lobj.gpu_offset); 711 tile_flags |= R300_TXO_MICRO_TILE;
712 tmp |= tile_flags; 712 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
713 ib[idx] = tmp; 713 tile_flags |= R300_TXO_MICRO_TILE_SQUARE;
714
715 tmp = idx_value + ((u32)reloc->lobj.gpu_offset);
716 tmp |= tile_flags;
717 ib[idx] = tmp;
718 }
714 track->textures[i].robj = reloc->robj; 719 track->textures[i].robj = reloc->robj;
715 track->tex_dirty = true; 720 track->tex_dirty = true;
716 break; 721 break;
@@ -760,24 +765,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
760 /* RB3D_COLORPITCH1 */ 765 /* RB3D_COLORPITCH1 */
761 /* RB3D_COLORPITCH2 */ 766 /* RB3D_COLORPITCH2 */
762 /* RB3D_COLORPITCH3 */ 767 /* RB3D_COLORPITCH3 */
763 r = r100_cs_packet_next_reloc(p, &reloc); 768 if (!p->keep_tiling_flags) {
764 if (r) { 769 r = r100_cs_packet_next_reloc(p, &reloc);
765 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 770 if (r) {
766 idx, reg); 771 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
767 r100_cs_dump_packet(p, pkt); 772 idx, reg);
768 return r; 773 r100_cs_dump_packet(p, pkt);
769 } 774 return r;
775 }
770 776
771 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 777 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
772 tile_flags |= R300_COLOR_TILE_ENABLE; 778 tile_flags |= R300_COLOR_TILE_ENABLE;
773 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 779 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
774 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 780 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
775 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE) 781 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
776 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; 782 tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE;
777 783
778 tmp = idx_value & ~(0x7 << 16); 784 tmp = idx_value & ~(0x7 << 16);
779 tmp |= tile_flags; 785 tmp |= tile_flags;
780 ib[idx] = tmp; 786 ib[idx] = tmp;
787 }
781 i = (reg - 0x4E38) >> 2; 788 i = (reg - 0x4E38) >> 2;
782 track->cb[i].pitch = idx_value & 0x3FFE; 789 track->cb[i].pitch = idx_value & 0x3FFE;
783 switch (((idx_value >> 21) & 0xF)) { 790 switch (((idx_value >> 21) & 0xF)) {
@@ -843,25 +850,26 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
843 break; 850 break;
844 case 0x4F24: 851 case 0x4F24:
845 /* ZB_DEPTHPITCH */ 852 /* ZB_DEPTHPITCH */
846 r = r100_cs_packet_next_reloc(p, &reloc); 853 if (!p->keep_tiling_flags) {
847 if (r) { 854 r = r100_cs_packet_next_reloc(p, &reloc);
848 DRM_ERROR("No reloc for ib[%d]=0x%04X\n", 855 if (r) {
849 idx, reg); 856 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
850 r100_cs_dump_packet(p, pkt); 857 idx, reg);
851 return r; 858 r100_cs_dump_packet(p, pkt);
852 } 859 return r;
853 860 }
854 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
855 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
856 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
857 tile_flags |= R300_DEPTHMICROTILE_TILED;
858 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
859 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
860 861
861 tmp = idx_value & ~(0x7 << 16); 862 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
862 tmp |= tile_flags; 863 tile_flags |= R300_DEPTHMACROTILE_ENABLE;
863 ib[idx] = tmp; 864 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
865 tile_flags |= R300_DEPTHMICROTILE_TILED;
866 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO_SQUARE)
867 tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE;
864 868
869 tmp = idx_value & ~(0x7 << 16);
870 tmp |= tile_flags;
871 ib[idx] = tmp;
872 }
865 track->zb.pitch = idx_value & 0x3FFC; 873 track->zb.pitch = idx_value & 0x3FFC;
866 track->zb_dirty = true; 874 track->zb_dirty = true;
867 break; 875 break;
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0a2e023c1557..cb1acffd2430 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -941,7 +941,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
941 track->db_depth_control = radeon_get_ib_value(p, idx); 941 track->db_depth_control = radeon_get_ib_value(p, idx);
942 break; 942 break;
943 case R_028010_DB_DEPTH_INFO: 943 case R_028010_DB_DEPTH_INFO:
944 if (r600_cs_packet_next_is_pkt3_nop(p)) { 944 if (!p->keep_tiling_flags &&
945 r600_cs_packet_next_is_pkt3_nop(p)) {
945 r = r600_cs_packet_next_reloc(p, &reloc); 946 r = r600_cs_packet_next_reloc(p, &reloc);
946 if (r) { 947 if (r) {
947 dev_warn(p->dev, "bad SET_CONTEXT_REG " 948 dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -992,7 +993,8 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
992 case R_0280B4_CB_COLOR5_INFO: 993 case R_0280B4_CB_COLOR5_INFO:
993 case R_0280B8_CB_COLOR6_INFO: 994 case R_0280B8_CB_COLOR6_INFO:
994 case R_0280BC_CB_COLOR7_INFO: 995 case R_0280BC_CB_COLOR7_INFO:
995 if (r600_cs_packet_next_is_pkt3_nop(p)) { 996 if (!p->keep_tiling_flags &&
997 r600_cs_packet_next_is_pkt3_nop(p)) {
996 r = r600_cs_packet_next_reloc(p, &reloc); 998 r = r600_cs_packet_next_reloc(p, &reloc);
997 if (r) { 999 if (r) {
998 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg); 1000 dev_err(p->dev, "bad SET_CONTEXT_REG 0x%04X\n", reg);
@@ -1291,10 +1293,12 @@ static int r600_check_texture_resource(struct radeon_cs_parser *p, u32 idx,
1291 mip_offset <<= 8; 1293 mip_offset <<= 8;
1292 1294
1293 word0 = radeon_get_ib_value(p, idx + 0); 1295 word0 = radeon_get_ib_value(p, idx + 0);
1294 if (tiling_flags & RADEON_TILING_MACRO) 1296 if (!p->keep_tiling_flags) {
1295 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1297 if (tiling_flags & RADEON_TILING_MACRO)
1296 else if (tiling_flags & RADEON_TILING_MICRO) 1298 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1297 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1299 else if (tiling_flags & RADEON_TILING_MICRO)
1300 word0 |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1301 }
1298 word1 = radeon_get_ib_value(p, idx + 1); 1302 word1 = radeon_get_ib_value(p, idx + 1);
1299 w0 = G_038000_TEX_WIDTH(word0) + 1; 1303 w0 = G_038000_TEX_WIDTH(word0) + 1;
1300 h0 = G_038004_TEX_HEIGHT(word1) + 1; 1304 h0 = G_038004_TEX_HEIGHT(word1) + 1;
@@ -1621,10 +1625,12 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
1621 return -EINVAL; 1625 return -EINVAL;
1622 } 1626 }
1623 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff); 1627 base_offset = (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
1624 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 1628 if (!p->keep_tiling_flags) {
1625 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1); 1629 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
1626 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1630 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_2D_TILED_THIN1);
1627 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1); 1631 else if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1632 ib[idx+1+(i*7)+0] |= S_038000_TILE_MODE(V_038000_ARRAY_1D_TILED_THIN1);
1633 }
1628 texture = reloc->robj; 1634 texture = reloc->robj;
1629 /* tex mip base */ 1635 /* tex mip base */
1630 r = r600_cs_packet_next_reloc(p, &reloc); 1636 r = r600_cs_packet_next_reloc(p, &reloc);
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index fc5a1d642cb5..8227e76b5c70 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -611,7 +611,8 @@ struct radeon_cs_parser {
611 struct radeon_ib *ib; 611 struct radeon_ib *ib;
612 void *track; 612 void *track;
613 unsigned family; 613 unsigned family;
614 int parser_error; 614 int parser_error;
615 bool keep_tiling_flags;
615}; 616};
616 617
617extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx); 618extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d2d179267af3..d24baf30efcb 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -62,6 +62,87 @@ union atom_supported_devices {
62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; 62 struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
63}; 63};
64 64
65static void radeon_lookup_i2c_gpio_quirks(struct radeon_device *rdev,
66 ATOM_GPIO_I2C_ASSIGMENT *gpio,
67 u8 index)
68{
69 /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */
70 if ((rdev->family == CHIP_R420) ||
71 (rdev->family == CHIP_R423) ||
72 (rdev->family == CHIP_RV410)) {
73 if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) ||
74 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) ||
75 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) {
76 gpio->ucClkMaskShift = 0x19;
77 gpio->ucDataMaskShift = 0x18;
78 }
79 }
80
81 /* some evergreen boards have bad data for this entry */
82 if (ASIC_IS_DCE4(rdev)) {
83 if ((index == 7) &&
84 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
85 (gpio->sucI2cId.ucAccess == 0)) {
86 gpio->sucI2cId.ucAccess = 0x97;
87 gpio->ucDataMaskShift = 8;
88 gpio->ucDataEnShift = 8;
89 gpio->ucDataY_Shift = 8;
90 gpio->ucDataA_Shift = 8;
91 }
92 }
93
94 /* some DCE3 boards have bad data for this entry */
95 if (ASIC_IS_DCE3(rdev)) {
96 if ((index == 4) &&
97 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
98 (gpio->sucI2cId.ucAccess == 0x94))
99 gpio->sucI2cId.ucAccess = 0x14;
100 }
101}
102
103static struct radeon_i2c_bus_rec radeon_get_bus_rec_for_i2c_gpio(ATOM_GPIO_I2C_ASSIGMENT *gpio)
104{
105 struct radeon_i2c_bus_rec i2c;
106
107 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
108
109 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
110 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
111 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
112 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
113 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
114 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
115 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
116 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
117 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
118 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
119 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
120 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
121 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
122 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
123 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
124 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
125
126 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
127 i2c.hw_capable = true;
128 else
129 i2c.hw_capable = false;
130
131 if (gpio->sucI2cId.ucAccess == 0xa0)
132 i2c.mm_i2c = true;
133 else
134 i2c.mm_i2c = false;
135
136 i2c.i2c_id = gpio->sucI2cId.ucAccess;
137
138 if (i2c.mask_clk_reg)
139 i2c.valid = true;
140 else
141 i2c.valid = false;
142
143 return i2c;
144}
145
65static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, 146static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
66 uint8_t id) 147 uint8_t id)
67{ 148{
@@ -85,59 +166,10 @@ static struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rd
85 for (i = 0; i < num_indices; i++) { 166 for (i = 0; i < num_indices; i++) {
86 gpio = &i2c_info->asGPIO_Info[i]; 167 gpio = &i2c_info->asGPIO_Info[i];
87 168
88 /* some evergreen boards have bad data for this entry */ 169 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
89 if (ASIC_IS_DCE4(rdev)) {
90 if ((i == 7) &&
91 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
92 (gpio->sucI2cId.ucAccess == 0)) {
93 gpio->sucI2cId.ucAccess = 0x97;
94 gpio->ucDataMaskShift = 8;
95 gpio->ucDataEnShift = 8;
96 gpio->ucDataY_Shift = 8;
97 gpio->ucDataA_Shift = 8;
98 }
99 }
100
101 /* some DCE3 boards have bad data for this entry */
102 if (ASIC_IS_DCE3(rdev)) {
103 if ((i == 4) &&
104 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
105 (gpio->sucI2cId.ucAccess == 0x94))
106 gpio->sucI2cId.ucAccess = 0x14;
107 }
108 170
109 if (gpio->sucI2cId.ucAccess == id) { 171 if (gpio->sucI2cId.ucAccess == id) {
110 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 172 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
111 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
112 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
113 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
114 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
115 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
116 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
117 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
118 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
119 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
120 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
121 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
122 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
123 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
124 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
125 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
126
127 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
128 i2c.hw_capable = true;
129 else
130 i2c.hw_capable = false;
131
132 if (gpio->sucI2cId.ucAccess == 0xa0)
133 i2c.mm_i2c = true;
134 else
135 i2c.mm_i2c = false;
136
137 i2c.i2c_id = gpio->sucI2cId.ucAccess;
138
139 if (i2c.mask_clk_reg)
140 i2c.valid = true;
141 break; 173 break;
142 } 174 }
143 } 175 }
@@ -157,8 +189,6 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
157 int i, num_indices; 189 int i, num_indices;
158 char stmp[32]; 190 char stmp[32];
159 191
160 memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
161
162 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { 192 if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
163 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); 193 i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
164 194
@@ -167,60 +197,12 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
167 197
168 for (i = 0; i < num_indices; i++) { 198 for (i = 0; i < num_indices; i++) {
169 gpio = &i2c_info->asGPIO_Info[i]; 199 gpio = &i2c_info->asGPIO_Info[i];
170 i2c.valid = false;
171
172 /* some evergreen boards have bad data for this entry */
173 if (ASIC_IS_DCE4(rdev)) {
174 if ((i == 7) &&
175 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) &&
176 (gpio->sucI2cId.ucAccess == 0)) {
177 gpio->sucI2cId.ucAccess = 0x97;
178 gpio->ucDataMaskShift = 8;
179 gpio->ucDataEnShift = 8;
180 gpio->ucDataY_Shift = 8;
181 gpio->ucDataA_Shift = 8;
182 }
183 }
184
185 /* some DCE3 boards have bad data for this entry */
186 if (ASIC_IS_DCE3(rdev)) {
187 if ((i == 4) &&
188 (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) &&
189 (gpio->sucI2cId.ucAccess == 0x94))
190 gpio->sucI2cId.ucAccess = 0x14;
191 }
192 200
193 i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; 201 radeon_lookup_i2c_gpio_quirks(rdev, gpio, i);
194 i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
195 i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
196 i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
197 i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
198 i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
199 i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
200 i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
201 i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
202 i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
203 i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
204 i2c.en_data_mask = (1 << gpio->ucDataEnShift);
205 i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
206 i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
207 i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
208 i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
209
210 if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
211 i2c.hw_capable = true;
212 else
213 i2c.hw_capable = false;
214 202
215 if (gpio->sucI2cId.ucAccess == 0xa0) 203 i2c = radeon_get_bus_rec_for_i2c_gpio(gpio);
216 i2c.mm_i2c = true;
217 else
218 i2c.mm_i2c = false;
219 204
220 i2c.i2c_id = gpio->sucI2cId.ucAccess; 205 if (i2c.valid) {
221
222 if (i2c.mask_clk_reg) {
223 i2c.valid = true;
224 sprintf(stmp, "0x%x", i2c.i2c_id); 206 sprintf(stmp, "0x%x", i2c.i2c_id);
225 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); 207 rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp);
226 } 208 }
@@ -1996,14 +1978,14 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
1996 return state_index; 1978 return state_index;
1997 /* last mode is usually default, array is low to high */ 1979 /* last mode is usually default, array is low to high */
1998 for (i = 0; i < num_modes; i++) { 1980 for (i = 0; i < num_modes; i++) {
1981 rdev->pm.power_state[state_index].clock_info =
1982 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
1983 if (!rdev->pm.power_state[state_index].clock_info)
1984 return state_index;
1985 rdev->pm.power_state[state_index].num_clock_modes = 1;
1999 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; 1986 rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE;
2000 switch (frev) { 1987 switch (frev) {
2001 case 1: 1988 case 1:
2002 rdev->pm.power_state[state_index].clock_info =
2003 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2004 if (!rdev->pm.power_state[state_index].clock_info)
2005 return state_index;
2006 rdev->pm.power_state[state_index].num_clock_modes = 1;
2007 rdev->pm.power_state[state_index].clock_info[0].mclk = 1989 rdev->pm.power_state[state_index].clock_info[0].mclk =
2008 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); 1990 le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock);
2009 rdev->pm.power_state[state_index].clock_info[0].sclk = 1991 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2039,11 +2021,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2039 state_index++; 2021 state_index++;
2040 break; 2022 break;
2041 case 2: 2023 case 2:
2042 rdev->pm.power_state[state_index].clock_info =
2043 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2044 if (!rdev->pm.power_state[state_index].clock_info)
2045 return state_index;
2046 rdev->pm.power_state[state_index].num_clock_modes = 1;
2047 rdev->pm.power_state[state_index].clock_info[0].mclk = 2024 rdev->pm.power_state[state_index].clock_info[0].mclk =
2048 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); 2025 le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock);
2049 rdev->pm.power_state[state_index].clock_info[0].sclk = 2026 rdev->pm.power_state[state_index].clock_info[0].sclk =
@@ -2080,11 +2057,6 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev)
2080 state_index++; 2057 state_index++;
2081 break; 2058 break;
2082 case 3: 2059 case 3:
2083 rdev->pm.power_state[state_index].clock_info =
2084 kzalloc(sizeof(struct radeon_pm_clock_info) * 1, GFP_KERNEL);
2085 if (!rdev->pm.power_state[state_index].clock_info)
2086 return state_index;
2087 rdev->pm.power_state[state_index].num_clock_modes = 1;
2088 rdev->pm.power_state[state_index].clock_info[0].mclk = 2060 rdev->pm.power_state[state_index].clock_info[0].mclk =
2089 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); 2061 le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock);
2090 rdev->pm.power_state[state_index].clock_info[0].sclk = 2062 rdev->pm.power_state[state_index].clock_info[0].sclk =
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index ccaa243c1442..29afd71e0840 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -93,7 +93,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
93{ 93{
94 struct drm_radeon_cs *cs = data; 94 struct drm_radeon_cs *cs = data;
95 uint64_t *chunk_array_ptr; 95 uint64_t *chunk_array_ptr;
96 unsigned size, i; 96 unsigned size, i, flags = 0;
97 97
98 if (!cs->num_chunks) { 98 if (!cs->num_chunks) {
99 return 0; 99 return 0;
@@ -140,6 +140,10 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
140 if (p->chunks[i].length_dw == 0) 140 if (p->chunks[i].length_dw == 0)
141 return -EINVAL; 141 return -EINVAL;
142 } 142 }
143 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS &&
144 !p->chunks[i].length_dw) {
145 return -EINVAL;
146 }
143 147
144 p->chunks[i].length_dw = user_chunk.length_dw; 148 p->chunks[i].length_dw = user_chunk.length_dw;
145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data; 149 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
@@ -155,6 +159,9 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
155 p->chunks[i].user_ptr, size)) { 159 p->chunks[i].user_ptr, size)) {
156 return -EFAULT; 160 return -EFAULT;
157 } 161 }
162 if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
163 flags = p->chunks[i].kdata[0];
164 }
158 } else { 165 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL); 166 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL); 167 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
@@ -174,6 +181,8 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
174 p->chunks[p->chunk_ib_idx].length_dw); 181 p->chunks[p->chunk_ib_idx].length_dw);
175 return -EINVAL; 182 return -EINVAL;
176 } 183 }
184
185 p->keep_tiling_flags = (flags & RADEON_CS_KEEP_TILING_FLAGS) != 0;
177 return 0; 186 return 0;
178} 187}
179 188
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index a0b35e909489..71499fc3daf5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -53,9 +53,10 @@
53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query 53 * 2.9.0 - r600 tiling (s3tc,rgtc) working, SET_PREDICATION packet 3 on r600 + eg, backend query
54 * 2.10.0 - fusion 2D tiling 54 * 2.10.0 - fusion 2D tiling
55 * 2.11.0 - backend map, initial compute support for the CS checker 55 * 2.11.0 - backend map, initial compute support for the CS checker
56 * 2.12.0 - RADEON_CS_KEEP_TILING_FLAGS
56 */ 57 */
57#define KMS_DRIVER_MAJOR 2 58#define KMS_DRIVER_MAJOR 2
58#define KMS_DRIVER_MINOR 11 59#define KMS_DRIVER_MINOR 12
59#define KMS_DRIVER_PATCHLEVEL 0 60#define KMS_DRIVER_PATCHLEVEL 0
60int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 61int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
61int radeon_driver_unload_kms(struct drm_device *dev); 62int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 617b64678fc6..0bb0f5f713e6 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -574,10 +574,16 @@ retry:
574 return ret; 574 return ret;
575 575
576 spin_lock(&glob->lru_lock); 576 spin_lock(&glob->lru_lock);
577
578 if (unlikely(list_empty(&bo->ddestroy))) {
579 spin_unlock(&glob->lru_lock);
580 return 0;
581 }
582
577 ret = ttm_bo_reserve_locked(bo, interruptible, 583 ret = ttm_bo_reserve_locked(bo, interruptible,
578 no_wait_reserve, false, 0); 584 no_wait_reserve, false, 0);
579 585
580 if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) { 586 if (unlikely(ret != 0)) {
581 spin_unlock(&glob->lru_lock); 587 spin_unlock(&glob->lru_lock);
582 return ret; 588 return ret;
583 } 589 }
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
index c72f1c0b5e63..111d956d8e7d 100644
--- a/drivers/gpu/vga/vgaarb.c
+++ b/drivers/gpu/vga/vgaarb.c
@@ -465,31 +465,29 @@ static void vga_arbiter_check_bridge_sharing(struct vga_device *vgadev)
465 while (new_bus) { 465 while (new_bus) {
466 new_bridge = new_bus->self; 466 new_bridge = new_bus->self;
467 467
468 if (new_bridge) { 468 /* go through list of devices already registered */
469 /* go through list of devices already registered */ 469 list_for_each_entry(same_bridge_vgadev, &vga_list, list) {
470 list_for_each_entry(same_bridge_vgadev, &vga_list, list) { 470 bus = same_bridge_vgadev->pdev->bus;
471 bus = same_bridge_vgadev->pdev->bus; 471 bridge = bus->self;
472 bridge = bus->self; 472
473 473 /* see if the share a bridge with this device */
474 /* see if the share a bridge with this device */ 474 if (new_bridge == bridge) {
475 if (new_bridge == bridge) { 475 /* if their direct parent bridge is the same
476 /* if their direct parent bridge is the same 476 as any bridge of this device then it can't be used
477 as any bridge of this device then it can't be used 477 for that device */
478 for that device */ 478 same_bridge_vgadev->bridge_has_one_vga = false;
479 same_bridge_vgadev->bridge_has_one_vga = false; 479 }
480 }
481 480
482 /* now iterate the previous devices bridge hierarchy */ 481 /* now iterate the previous devices bridge hierarchy */
483 /* if the new devices parent bridge is in the other devices 482 /* if the new devices parent bridge is in the other devices
484 hierarchy then we can't use it to control this device */ 483 hierarchy then we can't use it to control this device */
485 while (bus) { 484 while (bus) {
486 bridge = bus->self; 485 bridge = bus->self;
487 if (bridge) { 486 if (bridge) {
488 if (bridge == vgadev->pdev->bus->self) 487 if (bridge == vgadev->pdev->bus->self)
489 vgadev->bridge_has_one_vga = false; 488 vgadev->bridge_has_one_vga = false;
490 }
491 bus = bus->parent;
492 } 489 }
490 bus = bus->parent;
493 } 491 }
494 } 492 }
495 new_bus = new_bus->parent; 493 new_bus = new_bus->parent;
@@ -993,14 +991,20 @@ static ssize_t vga_arb_write(struct file *file, const char __user * buf,
993 uc = &priv->cards[i]; 991 uc = &priv->cards[i];
994 } 992 }
995 993
996 if (!uc) 994 if (!uc) {
997 return -EINVAL; 995 ret_val = -EINVAL;
996 goto done;
997 }
998 998
999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) 999 if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0) {
1000 return -EINVAL; 1000 ret_val = -EINVAL;
1001 goto done;
1002 }
1001 1003
1002 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) 1004 if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0) {
1003 return -EINVAL; 1005 ret_val = -EINVAL;
1006 goto done;
1007 }
1004 1008
1005 vga_put(pdev, io_state); 1009 vga_put(pdev, io_state);
1006 1010